From 6fbaebe07e0a9dc8cc10ddd429bf896b5ac64acb Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Wed, 24 Dec 2025 11:19:06 +0900 Subject: [PATCH 01/16] =?UTF-8?q?=F0=9F=94=A8=20=20A=20tiny=20refactoring?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/spectra.rst | 5 +++-- src/arpes/endstations/_helper/prodigy.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/source/spectra.rst b/docs/source/spectra.rst index 54418585..6d41abd5 100644 --- a/docs/source/spectra.rst +++ b/docs/source/spectra.rst @@ -68,8 +68,9 @@ otherwise recorded by default. Units ~~~~~ -Spatial and angular coordinates are reported in millimeters and radians -respectively. Temperatures are everywhere recorded in Kelvin. Relative +Spatial coordinates are reported in millimeters. Angular coordinates are reported in radians or degrees. +(Radians is the default unit, due to the historical reason.) Within a single Dataset/DataArray, the angular units must be consistent. +Temperatures are everywhere recorded in Kelvin. Relative times are reported in seconds. Currents are recorded in nanoamp unit. Pressures are recorded in torr. Potentials are recorded in volts. Laser pulse durations and other pump-probe quantities are reported in diff --git a/src/arpes/endstations/_helper/prodigy.py b/src/arpes/endstations/_helper/prodigy.py index bdd1ec16..3dedeb06 100644 --- a/src/arpes/endstations/_helper/prodigy.py +++ b/src/arpes/endstations/_helper/prodigy.py @@ -73,7 +73,7 @@ def parse_setscale( setscale = line.split(",", maxsplit=5) if "/I" in setscale[0]: flag = IgorSetscaleFlag.INCLUSIVE - elif "/P" in line: + elif "/P" in setscale[0]: flag = IgorSetscaleFlag.PERPOINTS else: flag = IgorSetscaleFlag.DEFAULT From d01788da4727eff96199e851ea0bf844c73ed48e Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 08:52:31 +0900 Subject: [PATCH 02/16] =?UTF-8?q?=F0=9F=8E=A8=20=20Follow=20the=20readthed?= =?UTF-8?q?ocs=20change?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.rst | 12 ++++++------ docs/source/index.rst | 2 +- docs/source/installation.rst | 2 +- .../notebooks/custom-dot-s-functionality.ipynb | 2 +- {docs => legacy_files}/netlify-redirect/index.html | 0 5 files changed, 9 insertions(+), 9 deletions(-) rename {docs => legacy_files}/netlify-redirect/index.html (100%) diff --git a/README.rst b/README.rst index 744a3889..76b03756 100644 --- a/README.rst +++ b/README.rst @@ -5,13 +5,13 @@ +-----------------------+ .. |Documentation| image:: https://img.shields.io/badge/api-reference-blue.svg - :target: https://arpes-v4.readthedocs.io/en/daredevil/ + :target: https://arpes-corrected.readthedocs.io/en/latest/ |coverage| |docs_status| |code_format| |code style| |uv| -.. |docs_status| image:: https://readthedocs.org/projects/arpes-v4/badge/?version=stable&style=flat - :target: https://arpes-v4.readthedocs.io/en/stable/ +.. |docs_status| image:: https://readthedocs.org/projects/arpes-corrected/badge/?version=stable&style=flat + :target: https://arpes-corrected.readthedocs.io/en/latest/ .. |coverage| image:: https://codecov.io/gh/arafune/arpes/graph/badge.svg?token=TW9EPVB1VE :target: https://app.codecov.io/gh/arafune/arpes .. |code style| image:: https://img.shields.io/badge/code%20style-black-000000.svg @@ -155,7 +155,7 @@ PyArpes contribution after `cadaaae`_, |copy| 2023-2025 by Ryuichi Arafune, all .. _cadaaae: https://github.com/arafune/arpes/commit/cadaaae0525d0889ef030cf18cf049da8fec2ee3 .. _Jupyter: https://jupyter.org/ -.. _the documentation site: https://arpes-v4.readthedocs.io/en/daredevil -.. _contributing: https://arpes-v4.readthedocs.io/en/daredevil/contributing.html -.. _FAQ: https://arpes-v4.readthedocs.io/en/daredevil/faq.html +.. _the documentation site: https://arpes-corrected.readthedocs.io/en/latest +.. _contributing: https://arpes-corrected.readthedocs.io/en/latest/contributing.html +.. _FAQ: https://arpes-corrected.readthedocs.io/en/latest/faq.html diff --git a/docs/source/index.rst b/docs/source/index.rst index 7dcbdc5a..72038cf2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -118,7 +118,7 @@ Gettinng started ================ See the section on the docs site about -`contributing `__ for +`contributing `__ for information on adding to PyARPES and rebuilding documentation from source. diff --git a/docs/source/installation.rst b/docs/source/installation.rst index afbdf488..4d250d30 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -49,7 +49,7 @@ Additional Suggested Steps 1. Install and configure standard tools like `Jupyter `__ or `Jupyter Lab `__. 2. Explore the documentation and example notebooks at - `the documentation site `__. + `the documentation site `__. Barebones kernel installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/notebooks/custom-dot-s-functionality.ipynb b/docs/source/notebooks/custom-dot-s-functionality.ipynb index 3530d3c9..1b88a1c3 100644 --- a/docs/source/notebooks/custom-dot-s-functionality.ipynb +++ b/docs/source/notebooks/custom-dot-s-functionality.ipynb @@ -208,7 +208,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.9" + "version": "3.12.12" } }, "nbformat": 4, diff --git a/docs/netlify-redirect/index.html b/legacy_files/netlify-redirect/index.html similarity index 100% rename from docs/netlify-redirect/index.html rename to legacy_files/netlify-redirect/index.html From 41021dff691a3ae094e76e32df4e9b0b3c3970ce Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 09:07:23 +0900 Subject: [PATCH 03/16] =?UTF-8?q?=E2=9C=85=20=20Change=20the=20example=5Fi?= =?UTF-8?q?tx=5Fdata.itx=20to=20improve=20coverage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/endstations/prodigy_itx.py | 6 ++---- src/arpes/example_data/example_itx_data.itx | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/arpes/endstations/prodigy_itx.py b/src/arpes/endstations/prodigy_itx.py index 74c4cf66..e145e1b4 100644 --- a/src/arpes/endstations/prodigy_itx.py +++ b/src/arpes/endstations/prodigy_itx.py @@ -144,10 +144,8 @@ def create_coords( ) dims.append(coord) attrs = {**common_attrs, **self.params} - if "y" in self.axis_info: - attrs["enegy_unit"] = self.axis_info["y"][3] - if "d" in self.axis_info: - attrs["count_unit"] = self.axis_info["d"][3] + attrs["enegy_unit"] = self.axis_info["y"][3] if "y" in self.axis_info else "" + attrs["count_unit"] = self.axis_info["d"][3] if "d" in self.axis_info else "" logger.debug(f"dims: {dims}") data_array = xr.DataArray( data=self.intensity.reshape(_pixel_to_shape(self.pixels)), diff --git a/src/arpes/example_data/example_itx_data.itx b/src/arpes/example_data/example_itx_data.itx index ad333ac4..bd0da089 100644 --- a/src/arpes/example_data/example_itx_data.itx +++ b/src/arpes/example_data/example_itx_data.itx @@ -625,5 +625,5 @@ BEGIN 0.274039 0.115863 0.348158 0.713136 0.955974 0.584256 0.583055 1.11135 0.766014 0.199213 0.497273 0.42919 0.447345 0.730286 0.165027 0.168681 0.758749 0.699118 0.250349 0.132319 0.477966 0.586581 1.23268 0.483255 0.24662 0.632838 0.358728 0.812116 1.03068 0.889161 0.525775 1.44138 1.16139 1.32711 0.599932 0.460356 1.15028 1.53559 1.27454 1.01025 1.05624 0.584066 1.17747 1.29634 1.50223 1.38121 1.44272 1.04916 0.464527 0.879684 1.31316 1.32088 1.00366 1.22117 1.09 1.14625 1.46556 0.809633 0.437844 0.576743 0.960247 0.600766 0.592159 1.61383 1.02878 1.42063 0.568872 0.986587 1.22306 1.84314 1.07813 1.27581 1.62267 1.02905 1.3886 0.709367 0.344146 0.970013 1.05933 0.838042 0.474599 0.919315 0.870507 0.591003 1.20265 1.05861 0.411396 0.775442 1.44256 0.51916 1.12087 0.833607 1.04117 0.989724 0.506702 0.427742 0.900676 0.94825 0.714519 0.809905 0.478169 1.12515 0.781224 0.877915 1.24629 1.23438 0.510776 0.0352878 0.274707 0.731957 0.880471 0.282435 0.358603 0.98995 0.990588 0.446899 0.622759 0.703334 1.80277 0.933778 0.894365 0.717716 1.05021 0.497535 0.59788 1.12457 1.1041 1.37907 1.59456 1.0776 0.937873 0.845882 1.25975 0.712356 0.546325 0.709325 0.905706 1.07814 1.01381 0.656445 0.491125 1.50404 1.01079 0.270623 1.2185 0.987942 0.96135 1.07488 1.69152 2.15681 2.629 1.47082 1.41801 1.45056 2.04316 1.39825 1.07048 0.858155 1.17969 2.02529 1.52289 1.48834 0.762617 1.40547 1.66869 2.14526 1.39104 1.94192 2.66106 1.99819 2.57547 1.03153 1.82183 1.77829 1.6541 1.29456 2.07637 2.44906 2.056 1.22003 2.03997 2.25513 2.65658 2.17786 1.99212 1.44892 1.67984 2.02486 1.7661 2.36643 3.22424 2.45754 2.22447 2.55196 2.61053 2.25475 3.80575 2.64028 2.50638 2.05473 2.62353 2.97928 4.6426 3.5395 3.06771 3.47369 4.70797 3.9026 3.56925 2.68289 3.62707 6.11177 5.87746 3.93961 5.84693 5.68526 5.62638 4.89216 6.50003 5.85347 3.58903 5.05456 4.89508 5.21554 6.21703 5.70538 5.72957 4.76544 5.39205 4.75235 4.34744 6.44015 5.91789 5.34225 5.28131 5.11058 5.04659 5.6525 4.86138 7.85632 4.67533 4.60702 4.90734 5.30632 4.6216 3.4855 3.1231 4.69893 4.33583 4.71375 4.50533 4.72695 3.93367 3.54464 2.78135 3.27373 2.59168 2.1739 2.4345 3.60129 4.36678 2.59503 2.57146 2.70817 2.88901 2.53639 2.01021 2.31564 2.95197 2.22361 2.43885 1.66209 1.29843 1.60794 1.50856 1.54616 1.20434 1.64664 1.30892 0.910971 1.60049 0.978165 1.7723 1.38915 0.694181 1.78558 0.989398 2.03052 0.840983 0.531656 0.414693 0.416066 1.17089 1.1323 0.740096 0.931871 0.431251 0.92789 0.901839 0.97152 0.137078 0.423667 0.404299 0.651435 0.263614 0.48393 0.477967 0.477088 0.18476 0.270058 0.517737 1.17044 0.382106 0.271284 0.44017 0.880654 0.806113 0.889902 0.412837 0.649843 0.644628 0.344474 0.75637 0.280786 0.405359 0.460666 0.849358 0.280851 0.362969 0.441075 1.05605 0.729989 0.355847 0.450823 0.622847 0.403726 0.464708 0.560829 0.43103 0.8228 0.438708 0.283933 0.525103 0.329381 0.28878 0.2835 0.478929 0.582876 0.327107 0.841646 0.982231 0.321725 0.148518 0.627247 0.750369 0.422872 0.709334 0.0351008 0.316185 0.196628 0.396039 0.348785 0.17732 0.179863 0.226762 0.145048 0.22236 0.132547 0.395003 0.628874 0.261214 0.427011 0.419704 0.105537 0.249597 0.529577 0.140662 0.0777095 0.256547 0.127936 0.547308 0.207995 0.0192262 0.0799035 0.167346 0.94362 0.191747 0.313098 0.511418 0.461811 0.342301 0.641493 0.0341492 0.130888 0.310835 0.557857 0.345678 0.666591 0.393191 0.397337 0.0248489 0.363116 0.211355 0.436073 0.155487 0.474471 0.870699 0.391538 0.72507 0.340949 0.231031 0.451327 0.166878 0.703413 0.209007 0.0204047 0.605467 0.407296 0.135366 0.315204 0.875885 0.478064 0.181735 0.298111 0.518159 0.216315 0.370569 0.637524 0.623101 0.690299 0.269424 0.555545 0.846317 1.06504 1.14427 0.824542 1.1437 1.48521 1.61824 1.21697 0.972239 1.22984 1.82851 1.68882 1.40883 2.38864 3.429 3.83011 3.64742 2.41916 4.65512 4.97714 3.69391 2.92931 4.44436 2.58115 3.10802 2.88045 1.34125 2.47938 2.54658 1.49825 1.07653 1.2853 0.839009 0.329086 0.426268 0.580159 0.5791 0.455692 0.340836 0.0012182 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.07717 0.254028 0.24977 0.108845 0 0 0 0 0 0 0 END X SetScale/I x, -12.4792, 12.4792, "deg (theta_y)", 'Spectrum_3_2' -X SetScale/I y, 9, 10, "eV (Kinetic Energy)", 'Spectrum_3_2' -X SetScale/I d, 0, 22.8718, "cps (Intensity)", 'Spectrum_3_2' +X SetScale/P y, 9, 0.002, "eV (Kinetic Energy)", 'Spectrum_3_2' +X SetScale d, 0, 22.8718, "cps (Intensity)", 'Spectrum_3_2' From efb792dd6b98df8422ccf08d345662fe1569b9c3 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 10:53:30 +0900 Subject: [PATCH 04/16] =?UTF-8?q?=F0=9F=8E=89=20=20Bump=20version=20to=205?= =?UTF-8?q?.0.3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/CHANGELOG.rst | 4 ++++ src/arpes/endstations/prodigy_itx.py | 28 ++++++++++++++-------------- src/arpes/setting.py | 2 +- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/docs/source/CHANGELOG.rst b/docs/source/CHANGELOG.rst index 862d1ace..e3156ac8 100644 --- a/docs/source/CHANGELOG.rst +++ b/docs/source/CHANGELOG.rst @@ -9,6 +9,10 @@ Primary (X.-.-) version numbers are used to denote backwards incompatibilities between versions, while minor (-.X.-) numbers primarily indicate new features and documentation. +5.0.3 (2026-XX-XX) +^^^^^^^^^^^^^^^^^^ + + 5.0.2 (2025-12-23) ^^^^^^^^^^^^^^^^^^ diff --git a/src/arpes/endstations/prodigy_itx.py b/src/arpes/endstations/prodigy_itx.py index 74c4cf66..a81eb530 100644 --- a/src/arpes/endstations/prodigy_itx.py +++ b/src/arpes/endstations/prodigy_itx.py @@ -108,19 +108,6 @@ def to_dataarray( Returns: xr.DataArray: pyarpess compatibility """ - - def create_coords( - axis_info: tuple[IgorSetscaleFlag, float, float, str], - pixels: int, - ) -> NDArray[np.float64]: - """Create coordinate array from the axis_info.""" - flag, start, delta_or_end, _ = axis_info - return flag.set_scale( - num1=float(start), - num2=float(delta_or_end), - pixels=pixels, - ) - common_attrs: dict[str, str | float] = { "spectrum_type": "cut", "angle_unit": "deg (theta_y)", @@ -138,7 +125,7 @@ def create_coords( for key, (coord, pix) in axis_defs.items(): if key in self.axis_info: - coords[coord] = create_coords( + coords[coord] = _create_coord( axis_info=self.axis_info[key], pixels=self.pixels[pix], ) @@ -320,3 +307,16 @@ def _parse_user_comment( else: common_params[item] = True return common_params + + +def _create_coord( + axis_info: tuple[IgorSetscaleFlag, float, float, str], + pixels: int, +) -> NDArray[np.float64]: + """Create coordinate array from the axis_info.""" + flag, start, delta_or_end, _ = axis_info + return flag.set_scale( + num1=float(start), + num2=float(delta_or_end), + pixels=pixels, + ) diff --git a/src/arpes/setting.py b/src/arpes/setting.py index 3303c962..132d2ade 100644 --- a/src/arpes/setting.py +++ b/src/arpes/setting.py @@ -13,4 +13,4 @@ # Base paths SOURCE_ROOT: str = str(Path(__file__).resolve().parent) -VERSION = "5.0.2" +VERSION = "5.0.3" From 203bab9439c4ed655962905c7e5de73688b99230 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 11:23:45 +0900 Subject: [PATCH 05/16] =?UTF-8?q?=E2=9C=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/test_correction_intensity_map.py | 30 +++++++++++++++----------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/test_correction_intensity_map.py b/tests/test_correction_intensity_map.py index 474dfd61..513ce75b 100644 --- a/tests/test_correction_intensity_map.py +++ b/tests/test_correction_intensity_map.py @@ -6,7 +6,7 @@ @pytest.fixture -def sample_data(): +def sample_data() -> xr.DataArray: x = np.linspace(0, 10, 11) y = np.linspace(0, 5, 6) z = np.random.rand(len(y), len(x)) @@ -14,7 +14,7 @@ def sample_data(): @pytest.fixture -def sample_data3D(): +def sample_data3D() -> xr.DataArray: x = np.linspace(0, 10, 11) y = np.linspace(0, 5, 6) w = np.linspace(0, 1, 6) # Adding a third dimension for testing @@ -22,9 +22,11 @@ def sample_data3D(): return xr.DataArray(z, coords={"y": y, "x": x, "w": w}, dims=["y", "x", "w"]) -def test_shift_with_xrdataarray(sample_data): +def test_shift_with_xrdataarray(sample_data: xr.DataArray): shift_vals = xr.DataArray( - np.ones(sample_data.sizes["y"]), coords={"y": sample_data.coords["y"]}, dims=["y"], + np.ones(sample_data.sizes["y"]), + coords={"y": sample_data.coords["y"]}, + dims=["y"], ) out = shift(sample_data, shift_vals, shift_axis="x", shift_coords=False) assert isinstance(out, xr.DataArray) @@ -32,28 +34,30 @@ def test_shift_with_xrdataarray(sample_data): np.testing.assert_array_equal(out.coords["y"], sample_data.coords["y"]) -def test_shift_with_xrdataarray_shift_coords(sample_data): +def test_shift_with_xrdataarray_shift_coords(sample_data: xr.DataArray): shift_vals = xr.DataArray( - np.ones(sample_data.sizes["y"]), coords={"y": sample_data.coords["y"]}, dims=["y"], + np.ones(sample_data.sizes["y"]), + coords={"y": sample_data.coords["y"]}, + dims=["y"], ) out = shift(sample_data, shift_vals, shift_axis="x", shift_coords=True) assert not np.allclose(out.coords["x"], sample_data.coords["x"]) -def test_shift_with_ndarray(sample_data): +def test_shift_with_ndarray(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y") assert out.shape == sample_data.shape -def test_shift_with_ndarray_missing_by_axis(sample_data): +def test_shift_with_ndarray_missing_by_axis(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) # This should succeed because the function infers by_axis when 2D out = shift(sample_data, shift_vals, shift_axis="x") assert out.shape == sample_data.shape -def test_shift_coords_alignment(sample_data): +def test_shift_coords_alignment(sample_data: xr.DataArray): shift_vals = np.linspace(-1, 1, sample_data.sizes["y"]) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", shift_coords=True) mean_shift = np.mean(shift_vals) @@ -61,25 +65,25 @@ def test_shift_coords_alignment(sample_data): np.testing.assert_allclose(out.coords["x"], expected_coords, atol=1e-6) -def test_shift_extend_coords_min(sample_data): +def test_shift_extend_coords_min(sample_data: xr.DataArray): shift_vals = np.full(sample_data.sizes["y"], 5.0) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", extend_coords=True) assert out.sizes["x"] > sample_data.sizes["x"] -def test_shift_extend_coords_max(sample_data): +def test_shift_extend_coords_max(sample_data: xr.DataArray): shift_vals = np.full(sample_data.sizes["y"], -5.0) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", extend_coords=True) assert out.sizes["x"] > sample_data.sizes["x"] -def test_shift_axis_required(sample_data): +def test_shift_axis_required(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) with pytest.raises(AssertionError): shift(sample_data, shift_vals, shift_axis="") -def test_shift_by_axis_required_for_ndarray(sample_data3D): +def test_shift_by_axis_required_for_ndarray(sample_data3D: xr.DataArray): shift_vals = np.ones(sample_data3D.sizes["x"]) # Not matching y with pytest.raises(TypeError): shift(sample_data3D, shift_vals, shift_axis="y") From 0f27ee6560b42189c548aeea8f1ecbfba03f7499 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 11:34:07 +0900 Subject: [PATCH 06/16] =?UTF-8?q?=E2=9C=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/test_correction_intensity_map.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_correction_intensity_map.py b/tests/test_correction_intensity_map.py index 513ce75b..ee875be9 100644 --- a/tests/test_correction_intensity_map.py +++ b/tests/test_correction_intensity_map.py @@ -14,7 +14,7 @@ def sample_data() -> xr.DataArray: @pytest.fixture -def sample_data3D() -> xr.DataArray: +def sample_data3d() -> xr.DataArray: x = np.linspace(0, 10, 11) y = np.linspace(0, 5, 6) w = np.linspace(0, 1, 6) # Adding a third dimension for testing @@ -83,10 +83,10 @@ def test_shift_axis_required(sample_data: xr.DataArray): shift(sample_data, shift_vals, shift_axis="") -def test_shift_by_axis_required_for_ndarray(sample_data3D: xr.DataArray): - shift_vals = np.ones(sample_data3D.sizes["x"]) # Not matching y +def test_shift_by_axis_required_for_ndarray(sample_data3d: xr.DataArray): + shift_vals = np.ones(sample_data3d.sizes["x"]) # Not matching y with pytest.raises(TypeError): - shift(sample_data3D, shift_vals, shift_axis="y") + shift(sample_data3d, shift_vals, shift_axis="y") def test_shift_with_integer_array(): From fbcb354acfa028b3f9610dd004c992a84316d578 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 16:46:45 +0900 Subject: [PATCH 07/16] =?UTF-8?q?=F0=9F=92=A5=20=20convert=5Fto=5Fkspace?= =?UTF-8?q?=20is=20moved=20to=20api.py=20from=20core.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/analysis/forward_conversion.py | 2 +- src/arpes/analysis/path.py | 2 +- src/arpes/correction/trapezoid.py | 2 +- .../coordinates.py} | 0 src/arpes/plotting/dispersion.py | 2 +- src/arpes/utilities/conversion/__init__.py | 3 +- src/arpes/utilities/conversion/api.py | 275 ++++++++++++++ src/arpes/utilities/conversion/coordinates.py | 254 +++++++++++++ src/arpes/utilities/conversion/core.py | 350 +----------------- tests/conftest.py | 10 +- tests/test_basic_data_loading.py | 58 +-- tests/test_conversion_core.py | 6 +- 12 files changed, 575 insertions(+), 389 deletions(-) rename src/arpes/{utilities/conversion/remap_manipulator.py => plotting/coordinates.py} (100%) create mode 100644 src/arpes/utilities/conversion/api.py create mode 100644 src/arpes/utilities/conversion/coordinates.py diff --git a/src/arpes/analysis/forward_conversion.py b/src/arpes/analysis/forward_conversion.py index 59aa0fa0..4bb3833e 100644 --- a/src/arpes/analysis/forward_conversion.py +++ b/src/arpes/analysis/forward_conversion.py @@ -25,13 +25,13 @@ from arpes.debug import setup_logger from arpes.provenance import update_provenance from arpes.utilities import normalize_to_spectrum +from arpes.utilities.conversion.api import convert_to_kspace from arpes.utilities.conversion.bounds_calculations import ( euler_to_kx, euler_to_ky, euler_to_kz, full_angles_to_k, ) -from arpes.utilities.conversion.core import convert_to_kspace from arpes.xarray_extensions.accessor.spectrum_type import EnergyNotation, SpectrumType if TYPE_CHECKING: diff --git a/src/arpes/analysis/path.py b/src/arpes/analysis/path.py index 34725780..d5b55341 100644 --- a/src/arpes/analysis/path.py +++ b/src/arpes/analysis/path.py @@ -21,7 +21,7 @@ from arpes.debug import setup_logger from arpes.provenance import Provenance, provenance -from arpes.utilities.conversion.core import convert_coordinates +from arpes.utilities.conversion.coordinates import convert_coordinates from arpes.utilities.conversion.grids import determine_axis_type LOGLEVELS = (DEBUG, INFO) diff --git a/src/arpes/correction/trapezoid.py b/src/arpes/correction/trapezoid.py index 04a177b7..619c4a05 100644 --- a/src/arpes/correction/trapezoid.py +++ b/src/arpes/correction/trapezoid.py @@ -26,7 +26,7 @@ from arpes.debug import setup_logger from arpes.utilities.conversion.base import CoordinateConverter -from arpes.utilities.conversion.core import convert_coordinates +from arpes.utilities.conversion.coordinates import convert_coordinates if TYPE_CHECKING: from collections.abc import Callable, Hashable diff --git a/src/arpes/utilities/conversion/remap_manipulator.py b/src/arpes/plotting/coordinates.py similarity index 100% rename from src/arpes/utilities/conversion/remap_manipulator.py rename to src/arpes/plotting/coordinates.py diff --git a/src/arpes/plotting/dispersion.py b/src/arpes/plotting/dispersion.py index 86ab75a2..f2e027ec 100644 --- a/src/arpes/plotting/dispersion.py +++ b/src/arpes/plotting/dispersion.py @@ -14,10 +14,10 @@ from arpes.analysis.path import slice_along_path from arpes.io import load_data +from arpes.plotting.coordinates import remap_coords_to from arpes.preparation import normalize_dim from arpes.provenance import save_plot_provenance from arpes.utilities import bz -from arpes.utilities.conversion import remap_coords_to from .utils import label_for_colorbar, label_for_dim, label_for_symmetry_point, path_for_plot diff --git a/src/arpes/utilities/conversion/__init__.py b/src/arpes/utilities/conversion/__init__.py index c008b203..dba12b67 100644 --- a/src/arpes/utilities/conversion/__init__.py +++ b/src/arpes/utilities/conversion/__init__.py @@ -2,6 +2,5 @@ from __future__ import annotations +from .api import convert_to_kspace from .calibration import DetectorCalibration -from .core import convert_to_kspace -from .remap_manipulator import remap_coords_to diff --git a/src/arpes/utilities/conversion/api.py b/src/arpes/utilities/conversion/api.py new file mode 100644 index 00000000..3ffa58ab --- /dev/null +++ b/src/arpes/utilities/conversion/api.py @@ -0,0 +1,275 @@ +"""Public API for momentum-space (k-space) conversion of ARPES data. + +This module defines the user-facing entry points for converting ARPES +data from angle/energy space into momentum space. The functions provided +here are stable, documented, and intended to be imported and used directly +by analysis scripts and notebooks. + +Design principles: + - This module exposes *high-level orchestration functions* only. + - Implementation details (coordinate transforms, interpolation, + grid construction) are delegated to lower-level modules. + - Functions here operate on `xarray.DataArray` objects and return + new objects without modifying inputs in place. + +Scope: + - Backward (interpolating) k-space conversion via `convert_to_kspace` + - Chunk-aware conversion for large energy stacks + - Provenance tracking for reproducibility + +Non-goals: + - This module does not implement low-level coordinate transforms. + - This module does not define interpolation kernels or numerical solvers. + - Forward (coordinate-only) conversion routines live elsewhere. + +Typical usage: + >>> from arpes.utilities.conversion.api import convert_to_kspace + >>> kdata = convert_to_kspace(data, kx=..., ky=...) + +Module structure: + - convert_to_kspace: + Primary public entry point for k-space conversion. + - _chunk_convert: + Internal helper for chunk-wise conversion (not part of the public API). + +See Also: + - arpes.utilities.conversion.coordinates: + Low-level coordinate transformation utilities. + - arpes.utilities.conversion.core: + Internal conversion machinery and interpolation logic. +""" + +from __future__ import annotations + +import warnings +from logging import DEBUG, INFO +from typing import TYPE_CHECKING, Unpack + +import numpy as np +import xarray as xr + +from arpes.debug import setup_logger +from arpes.provenance import update_provenance +from arpes.utilities import normalize_to_spectrum +from arpes.xarray_extensions.accessor.spectrum_type import AngleUnit + +from .coordinates import convert_coordinates +from .core import _is_dims_match_coordinate_convert +from .grids import ( + determine_momentum_axes_from_measurement_axes, + is_dimension_convertible_to_momentum, +) +from .kx_ky_conversion import ConvertKp, ConvertKxKy +from .kz_conversion import ConvertKpKz + +if TYPE_CHECKING: + from collections.abc import Hashable + + from numpy.typing import NDArray + + from arpes._typing.attrs_property import KspaceCoords + from arpes._typing.base import MOMENTUM + from arpes.utilities.conversion.base import CoordinateConverter + from arpes.utilities.conversion.calibration import DetectorCalibration + + +LOGLEVELS = (DEBUG, INFO) +LOGLEVEL = LOGLEVELS[1] +logger = setup_logger(__name__, LOGLEVEL) + + +@update_provenance("Automatically k-space converted") +def convert_to_kspace( # noqa: PLR0913 + arr: xr.DataArray, + *, + bounds: dict[MOMENTUM, tuple[float, float]] | None = None, + resolution: dict[MOMENTUM, float] | None = None, + calibration: DetectorCalibration | None = None, + coords: KspaceCoords | None = None, + allow_chunks: bool = False, + **kwargs: Unpack[KspaceCoords], +) -> xr.DataArray: + """Converts volumetric the data to momentum space ("backwards"). Typically what you want. + + Works in general by regridding the data into the new coordinate space and then + interpolating back into the original data. + + For forward conversion, see sibling methods. Forward conversion works by + totally unchanged by the conversion (if we do not apply a Jacobian correction), but the + converting the coordinates, rather than by interpolating the data. As a result, the data will be + coordinates will no longer have equal spacing. + + This is only really useful for zero and one dimensional data because for two dimensional data, + the coordinates must become two dimensional in order to fully specify every data point + (this is true in generality, in 3D the coordinates must become 3D as well). + + The only exception to this is if the extra axes do not need to be k-space converted. As is the + case where one of the dimensions is `cycle` or `delay`, for instance. + + You can request a particular resolution for the new data with the `resolution=` parameter, + or a specific set of bounds with the `bounds=` + + Examples: + Convert a 2D cut with automatically inferred range and resolution. + + >>> convert_to_kspace(arpes.io.load_example_data()) # doctest: +SKIP + + xr.DataArray(...) + + Convert a 3D map with a specified momentum window + + >>> convert_to_kspace( # doctest: +SKIP + fermi_surface_map, + kx=np.linspace(-1, 1, 200), + ky=np.linspace(-1, 1, 350), + ) + xr.DataArray(...) + + Args: + arr (xr.DataArray): ARPES data + bounds (dict[MOMENTUM, tuple[float, float]], optional): + The key is the axis name. The value is the bounds. Defaults to {}. + If not set this arg, set coords. + resolution (dict[Momentum, float], optional): dict for the energy/angular resolution. + calibration (DetectorCalibration, optional): DetectorCalibration object. Defaults to None. + coords (KspaceCoords, optional): Coordinate of k-space. Defaults to {}. + allow_chunks (bool): [description]. Defaults to False. + **kwargs: treated as coords. + + Raises: + NotImplementedError: [description] + AnalysisError: [description] + ValueError: [description] + + Returns: + xr.DataArray: Converted ARPES (k-space) data. + """ + coords = {} if coords is None else coords + assert coords is not None + coords.update(kwargs) + + bounds = bounds or {} + arr = arr if isinstance(arr, xr.DataArray) else normalize_to_spectrum(arr) + assert isinstance(arr, xr.DataArray) + if arr.S.angle_unit is AngleUnit.DEG: + arr = arr.S.switched_angle_unit() + logger.debug(f"bounds (covnert_to_kspace): {bounds}") + logger.debug(f"keys in coords (convert_to_kspace): {coords.keys()}") + # Chunking logic + if allow_chunks and ("eV" in arr.dims) and len(arr.eV) > 50: # noqa: PLR2004 + return _chunk_convert( + arr=arr, + bounds=bounds, + resolution=resolution, + calibration=calibration, + coords=coords, + **kwargs, + ) + momentum_incompatibles: list[str] = [ + str(d) + for d in arr.dims + if not is_dimension_convertible_to_momentum(str(d)) and str(d) != "eV" + ] + momentum_compatibles: list[str] = sorted( # Literal["phi", "theta", "beta", "chi", "psi", "hv"] + [str(d) for d in arr.dims if is_dimension_convertible_to_momentum(str(d))], + ) + + if not momentum_compatibles: + return arr # no need to convert, might be XPS or similar + + converted_dims: list[str] = ( + (["eV"] if ("eV" in arr.dims) else []) + + determine_momentum_axes_from_measurement_axes( + momentum_compatibles, + ) # axis_names: list[Literal["phi", "beta", "psi", "theta", "hv"]], + + momentum_incompatibles + ) + + tupled_momentum_compatibles = tuple(momentum_compatibles) + convert_cls: type[ConvertKp | ConvertKxKy | ConvertKpKz] | None = None + if _is_dims_match_coordinate_convert(tupled_momentum_compatibles): + convert_cls = { + ("phi",): ConvertKp, + ("beta", "phi"): ConvertKxKy, + ("phi", "theta"): ConvertKxKy, + ("phi", "psi"): ConvertKxKy, + # ('chi', 'phi',): ConvertKxKy, + ("hv", "phi"): ConvertKpKz, + }.get(tupled_momentum_compatibles) + assert convert_cls is not None, "Cannot select convert class" + + converter: CoordinateConverter = convert_cls( + arr=arr, + dim_order=converted_dims, + calibration=calibration, + ) + + converted_coordinates: dict[Hashable, NDArray[np.float64]] = converter.get_coordinates( + resolution=resolution, + bounds=bounds, + ) + if not set(coords.keys()).issubset(converted_coordinates.keys()): + extra = set(coords.keys()).difference(converted_coordinates.keys()) + msg = f"Unexpected passed coordinates: {extra}" + raise ValueError(msg) + converted_coordinates.update(**coords) # type: ignore[misc] + result = convert_coordinates( + arr, + target_coordinates=converted_coordinates, + coordinate_transform={ + "dims": converted_dims, + "transforms": {str(dim): converter.conversion_for(dim) for dim in arr.dims}, + }, + ) + assert isinstance(result, xr.DataArray) + return result + + +def _chunk_convert( + arr: xr.DataArray, + bounds: dict[MOMENTUM, tuple[float, float]] | None = None, + resolution: dict[MOMENTUM, float] | None = None, + calibration: DetectorCalibration | None = None, + coords: KspaceCoords | None = None, + **kwargs: Unpack[KspaceCoords], +) -> xr.DataArray: + DESIRED_CHUNK_SIZE = 1000 * 1000 * 20 + TOO_LARGE_CHUNK_SIZE = 100 + n_chunks: np.int_ = np.prod(arr.shape) // DESIRED_CHUNK_SIZE + if n_chunks == 0: + warnings.warn( + "Data size is sufficiently small, set allow_chunks=False", + stacklevel=2, + ) + n_chunks += 1 + + if n_chunks > TOO_LARGE_CHUNK_SIZE: + warnings.warn( + "Input array is very large. Please consider resampling.", + stacklevel=2, + ) + chunk_thickness = np.max(len(arr.eV) // n_chunks, 1) + logger.debug(f"Chunking along energy: {n_chunks}, thickness {chunk_thickness}") + finished = [] + low_idx = 0 + high_idx = chunk_thickness + while low_idx < len(arr.eV): + chunk = arr.isel(eV=slice(low_idx, high_idx)) + if len(chunk.eV) == 1: + chunk = chunk.squeeze(dim="eV") + kchunk = convert_to_kspace( + arr=chunk, + bounds=bounds, + resolution=resolution, + calibration=calibration, + coords=coords, + allow_chunks=False, + **kwargs, + ) + if "eV" not in kchunk.dims: + kchunk = kchunk.expand_dims("eV") + assert isinstance(kchunk, xr.DataArray) + finished.append(kchunk) + low_idx = high_idx + high_idx = min(len(arr.eV), high_idx + chunk_thickness) + return xr.concat(finished, dim="eV") diff --git a/src/arpes/utilities/conversion/coordinates.py b/src/arpes/utilities/conversion/coordinates.py new file mode 100644 index 00000000..612b1267 --- /dev/null +++ b/src/arpes/utilities/conversion/coordinates.py @@ -0,0 +1,254 @@ +"""Low-level coordinate transformation utilities. + +This module contains internal, low-level routines used to convert ARPES data +between coordinate systems (e.g. angle space to momentum space) using +volumetric interpolation. + +⚠️ Design notes +---------------- +The functions defined here are **NOT part of the public API**. + +They intentionally expose implementation details such as: +- explicit target coordinate grids, +- transformation dictionaries, +- interpolation order and reshaping rules, +- assumptions about xarray dimensions and accessors. + +As a result: +- This module MUST NOT be re-exported from ``arpes.utilities.conversion.__init__``. +- Users should NOT import from this module directly. +- Backward compatibility is NOT guaranteed. + +Public-facing coordinate conversion functionality is provided instead via: +- higher-level analysis routines (e.g. ``convert_to_kspace``), +- xarray accessors (``DataArray.S``), +- or dedicated user APIs in the ``analysis`` layer. + +🧱 Architectural role +--------------------- +This module belongs to the **internal utility layer** and is designed to be: + +- shared by plotting and analysis code, +- flexible and powerful, +- free to evolve without API constraints. + +It should depend only on: +- NumPy, +- xarray, +- and other internal utilities, + +and must not introduce dependencies on higher-level modules such as +``analysis``, ``plotting``, or xarray accessors beyond what is strictly required. + +If you are looking for a user-facing function, this is probably not the module +you want to import. +""" + +from __future__ import annotations + +import contextlib +from logging import DEBUG, INFO +from typing import TYPE_CHECKING, TypedDict + +import numpy as np +import xarray as xr + +from arpes.debug import setup_logger + +from .core import grid_interpolator_from_dataarray +from .fast_interp import Interpolator + +if TYPE_CHECKING: + from collections.abc import Callable, Hashable + + from numpy.typing import NDArray + + from arpes._typing.base import XrTypes + + +LOGLEVELS = (DEBUG, INFO) +LOGLEVEL = LOGLEVELS[1] +logger = setup_logger(__name__, LOGLEVEL) + + +class CoordinateTransform(TypedDict, total=True): + """Internal specification of a coordinate transformation. + + This TypedDict defines the minimal contract required by the low-level + volumetric coordinate conversion engine. It describes: + + - the ordered set of target dimensions in the output space, and + - a mapping from each source/target dimension name to a callable + that computes the corresponding coordinate values. + + ⚠️ This is an internal data structure. + ------------------------------------ + It is **not part of the public API** and may change without notice. + Users should not construct or rely on this object directly. + + The structure is intentionally lightweight and flexible to support + different coordinate systems (e.g. angle space, momentum space) without + imposing a rigid class hierarchy. + + Fields + ------ + dims : list[str] or list[Hashable] + Ordered names of the target coordinate dimensions. + The order determines the shape and ordering of the output array. + + In most practical ARPES use cases, this will be something like:: + + ["kp"] # cut data + ["kx", "ky"] # 2D momentum maps + ["kx", "ky", "kz"] # 3D momentum volumes + + but no specific coordinate system is assumed at this level. + + transforms : dict[str, Callable[..., NDArray[np.float64]]] + Mapping from coordinate names to transformation functions. + + Each callable must accept a sequence of meshed coordinate arrays + (as produced by ``numpy.meshgrid``) and return a NumPy array of + transformed coordinate values. + + The keys of this dictionary are expected to include: + - all original dimensions of the input DataArray, and + - all target dimensions listed in ``dims``. + + Notes: + - No validation of physical correctness is performed here. + - The numerical meaning of the transforms is defined entirely by + the calling code. + - This structure is designed to support volumetric interpolation + workflows and should remain free of higher-level concepts such as + spectrum type, plotting logic, or experiment metadata. + """ + + dims: list[str] | list[Hashable] # in most case dims should be Literal["kp", "kx", "ky", "kz"]] + transforms: dict[str, Callable[..., NDArray[np.float64]]] + + +def convert_coordinates( + arr: xr.DataArray, + target_coordinates: dict[Hashable, NDArray[np.float64]], + coordinate_transform: CoordinateTransform, + *, + as_dataset: bool = False, +) -> XrTypes: + """Return the band structure data (converted to k-space). + + Args: + arr(xr.DataArray): ARPES data + target_coordinates:(dict[Hashable, NDArray[np.float64]]): coorrdinate for ... + coordinate_transform(dict[str, list[str] | Callable]): coordinat for ... + as_dataset(bool): if True, return the data as the dataSet + + Returns: + XrTypes + """ + assert isinstance(arr, xr.DataArray) + ordered_source_dimensions = arr.dims + + grid_interpolator = grid_interpolator_from_dataarray( + arr.transpose(*ordered_source_dimensions), # TODO(RA): No need? -- perhaps no. + fill_value=np.nan, + ) + + # Skip the Jacobian correction for now + # Convert the raw coordinate axes to a set of gridded points + logger.debug( + f"meshgrid: {[len(target_coordinates[dim]) for dim in coordinate_transform['dims']]}", + ) + meshed_coordinates = [ + meshed_coord.ravel() + for meshed_coord in np.meshgrid( + *[target_coordinates[dim] for dim in coordinate_transform["dims"]], + indexing="ij", + ) + ] + + if "eV" not in arr.dims: + with contextlib.suppress(ValueError): + meshed_coordinates = [arr.S.lookup_offset_coord("eV"), *meshed_coordinates] + old_coord_names = [str(dim) for dim in arr.dims if dim not in target_coordinates] + assert isinstance(coordinate_transform["transforms"], dict) + transforms: dict[str, Callable[..., NDArray[np.float64]]] = coordinate_transform["transforms"] + logger.debug(f"transforms is {transforms}") + old_coordinate_transforms = [ + transforms[str(dim)] for dim in arr.dims if dim not in target_coordinates + ] + logger.debug(f"old_coordinate_transforms: {old_coordinate_transforms}") + + output_shape = [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]] + + def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray[np.float64]: + logger.debug(f"transform function is {transform}") + return np.reshape( + transform(*meshed_coordinates), + output_shape, + order="C", + ) + + old_dimensions = [compute_coordinate(tr) for tr in old_coordinate_transforms] + + ordered_transformations = [transforms[str(dim)] for dim in arr.dims] + transformed_coordinates = [tr(*meshed_coordinates) for tr in ordered_transformations] + + converted_volume = ( + grid_interpolator(np.array(transformed_coordinates).T) + if not isinstance(grid_interpolator, Interpolator) + else grid_interpolator(transformed_coordinates) + ) + + # Wrap it all up + def acceptable_coordinate(c: NDArray[np.float64] | xr.DataArray) -> bool: + """Return True if the dim of array is subset of dim of coordinate_transform. + + Currently we do this to filter out coordinates + that are functions of the old angular dimensions, + we could forward convert these, but right now we do not + + Args: + c (xr.DataArray): DataArray for check. + + Returns: bool + Return True if the dim of array is subset of dim of coordinate_transform. + """ + if isinstance(c, xr.DataArray): + return set(c.dims).issubset(coordinate_transform["dims"]) + return True + + target_coordinates = {k: v for k, v in target_coordinates.items() if acceptable_coordinate(v)} + data = xr.DataArray( + np.reshape( + converted_volume, + [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]], + order="C", + ), + target_coordinates, + coordinate_transform["dims"], + attrs=arr.attrs, + ) + if as_dataset: + old_mapped_coords = [ + xr.DataArray( + values, + coords=target_coordinates, + dims=coordinate_transform["dims"], + attrs=arr.attrs, + ) + for values in old_dimensions + ] + variables = {"data": data} + variables.update( + dict( + zip( + old_coord_names, + old_mapped_coords, + strict=True, + ), + ), + ) + return xr.Dataset(variables, attrs=arr.attrs) + + return data diff --git a/src/arpes/utilities/conversion/core.py b/src/arpes/utilities/conversion/core.py index ac36cf19..416eecb9 100644 --- a/src/arpes/utilities/conversion/core.py +++ b/src/arpes/utilities/conversion/core.py @@ -22,40 +22,20 @@ from __future__ import annotations -import contextlib -import warnings from logging import DEBUG, INFO -from typing import TYPE_CHECKING, Literal, TypedDict, TypeGuard, Unpack +from typing import TYPE_CHECKING, Literal, TypeGuard import numpy as np import xarray as xr from scipy.interpolate import RegularGridInterpolator from arpes.debug import setup_logger -from arpes.provenance import update_provenance -from arpes.utilities import normalize_to_spectrum -from arpes.xarray_extensions.accessor.spectrum_type import AngleUnit from .fast_interp import Interpolator -from .grids import ( - determine_momentum_axes_from_measurement_axes, - is_dimension_convertible_to_momentum, -) -from .kx_ky_conversion import ConvertKp, ConvertKxKy -from .kz_conversion import ConvertKpKz if TYPE_CHECKING: - from collections.abc import Callable, Hashable - from numpy.typing import NDArray - from arpes._typing.attrs_property import KspaceCoords - from arpes._typing.base import MOMENTUM, XrTypes - from arpes.utilities.conversion.base import CoordinateConverter - from arpes.utilities.conversion.calibration import DetectorCalibration - -__all__ = ["convert_to_kspace"] - LOGLEVELS = (DEBUG, INFO) LOGLEVEL = LOGLEVELS[1] @@ -115,331 +95,3 @@ def _is_dims_match_coordinate_convert( ("phi", "psi"), ("hv", "phi"), } - - -@update_provenance("Automatically k-space converted") -def convert_to_kspace( # noqa: PLR0913 - arr: xr.DataArray, - *, - bounds: dict[MOMENTUM, tuple[float, float]] | None = None, - resolution: dict[MOMENTUM, float] | None = None, - calibration: DetectorCalibration | None = None, - coords: KspaceCoords | None = None, - allow_chunks: bool = False, - **kwargs: Unpack[KspaceCoords], -) -> xr.DataArray: - """Converts volumetric the data to momentum space ("backwards"). Typically what you want. - - Works in general by regridding the data into the new coordinate space and then - interpolating back into the original data. - - For forward conversion, see sibling methods. Forward conversion works by - totally unchanged by the conversion (if we do not apply a Jacobian correction), but the - converting the coordinates, rather than by interpolating the data. As a result, the data will be - coordinates will no longer have equal spacing. - - This is only really useful for zero and one dimensional data because for two dimensional data, - the coordinates must become two dimensional in order to fully specify every data point - (this is true in generality, in 3D the coordinates must become 3D as well). - - The only exception to this is if the extra axes do not need to be k-space converted. As is the - case where one of the dimensions is `cycle` or `delay`, for instance. - - You can request a particular resolution for the new data with the `resolution=` parameter, - or a specific set of bounds with the `bounds=` - - Examples: - Convert a 2D cut with automatically inferred range and resolution. - - >>> convert_to_kspace(arpes.io.load_example_data()) # doctest: +SKIP - - xr.DataArray(...) - - Convert a 3D map with a specified momentum window - - >>> convert_to_kspace( # doctest: +SKIP - fermi_surface_map, - kx=np.linspace(-1, 1, 200), - ky=np.linspace(-1, 1, 350), - ) - xr.DataArray(...) - - Args: - arr (xr.DataArray): ARPES data - bounds (dict[MOMENTUM, tuple[float, float]], optional): - The key is the axis name. The value is the bounds. Defaults to {}. - If not set this arg, set coords. - resolution (dict[Momentum, float], optional): dict for the energy/angular resolution. - calibration (DetectorCalibration, optional): DetectorCalibration object. Defaults to None. - coords (KspaceCoords, optional): Coordinate of k-space. Defaults to {}. - allow_chunks (bool): [description]. Defaults to False. - **kwargs: treated as coords. - - Raises: - NotImplementedError: [description] - AnalysisError: [description] - ValueError: [description] - - Returns: - xr.DataArray: Converted ARPES (k-space) data. - """ - coords = {} if coords is None else coords - assert coords is not None - coords.update(kwargs) - - bounds = bounds or {} - arr = arr if isinstance(arr, xr.DataArray) else normalize_to_spectrum(arr) - assert isinstance(arr, xr.DataArray) - if arr.S.angle_unit is AngleUnit.DEG: - arr = arr.S.switched_angle_unit() - logger.debug(f"bounds (covnert_to_kspace): {bounds}") - logger.debug(f"keys in coords (convert_to_kspace): {coords.keys()}") - # Chunking logic - if allow_chunks and ("eV" in arr.dims) and len(arr.eV) > 50: # noqa: PLR2004 - return _chunk_convert( - arr=arr, - bounds=bounds, - resolution=resolution, - calibration=calibration, - coords=coords, - **kwargs, - ) - momentum_incompatibles: list[str] = [ - str(d) - for d in arr.dims - if not is_dimension_convertible_to_momentum(str(d)) and str(d) != "eV" - ] - momentum_compatibles: list[str] = sorted( # Literal["phi", "theta", "beta", "chi", "psi", "hv"] - [str(d) for d in arr.dims if is_dimension_convertible_to_momentum(str(d))], - ) - - if not momentum_compatibles: - return arr # no need to convert, might be XPS or similar - - converted_dims: list[str] = ( - (["eV"] if ("eV" in arr.dims) else []) - + determine_momentum_axes_from_measurement_axes( - momentum_compatibles, - ) # axis_names: list[Literal["phi", "beta", "psi", "theta", "hv"]], - + momentum_incompatibles - ) - - tupled_momentum_compatibles = tuple(momentum_compatibles) - convert_cls: type[ConvertKp | ConvertKxKy | ConvertKpKz] | None = None - if _is_dims_match_coordinate_convert(tupled_momentum_compatibles): - convert_cls = { - ("phi",): ConvertKp, - ("beta", "phi"): ConvertKxKy, - ("phi", "theta"): ConvertKxKy, - ("phi", "psi"): ConvertKxKy, - # ('chi', 'phi',): ConvertKxKy, - ("hv", "phi"): ConvertKpKz, - }.get(tupled_momentum_compatibles) - assert convert_cls is not None, "Cannot select convert class" - - converter: CoordinateConverter = convert_cls( - arr=arr, - dim_order=converted_dims, - calibration=calibration, - ) - - converted_coordinates: dict[Hashable, NDArray[np.float64]] = converter.get_coordinates( - resolution=resolution, - bounds=bounds, - ) - if not set(coords.keys()).issubset(converted_coordinates.keys()): - extra = set(coords.keys()).difference(converted_coordinates.keys()) - msg = f"Unexpected passed coordinates: {extra}" - raise ValueError(msg) - converted_coordinates.update(**coords) # type: ignore[misc] - result = convert_coordinates( - arr, - target_coordinates=converted_coordinates, - coordinate_transform={ - "dims": converted_dims, - "transforms": {str(dim): converter.conversion_for(dim) for dim in arr.dims}, - }, - ) - assert isinstance(result, xr.DataArray) - return result - - -class CoordinateTransform(TypedDict, total=True): - dims: list[str] | list[Hashable] # in most case dims should be Literal["kp", "kx", "ky", "kz"]] - transforms: dict[str, Callable[..., NDArray[np.float64]]] - - -def convert_coordinates( - arr: xr.DataArray, - target_coordinates: dict[Hashable, NDArray[np.float64]], - coordinate_transform: CoordinateTransform, - *, - as_dataset: bool = False, -) -> XrTypes: - """Return the band structure data (converted to k-space). - - Args: - arr(xr.DataArray): ARPES data - target_coordinates:(dict[Hashable, NDArray[np.float64]]): coorrdinate for ... - coordinate_transform(dict[str, list[str] | Callable]): coordinat for ... - as_dataset(bool): if True, return the data as the dataSet - - Returns: - XrTypes - """ - assert isinstance(arr, xr.DataArray) - ordered_source_dimensions = arr.dims - - grid_interpolator = grid_interpolator_from_dataarray( - arr.transpose(*ordered_source_dimensions), # TODO(RA): No need? -- perhaps no. - fill_value=np.nan, - ) - - # Skip the Jacobian correction for now - # Convert the raw coordinate axes to a set of gridded points - logger.debug( - f"meshgrid: {[len(target_coordinates[dim]) for dim in coordinate_transform['dims']]}", - ) - meshed_coordinates = [ - meshed_coord.ravel() - for meshed_coord in np.meshgrid( - *[target_coordinates[dim] for dim in coordinate_transform["dims"]], - indexing="ij", - ) - ] - - if "eV" not in arr.dims: - with contextlib.suppress(ValueError): - meshed_coordinates = [arr.S.lookup_offset_coord("eV"), *meshed_coordinates] - old_coord_names = [str(dim) for dim in arr.dims if dim not in target_coordinates] - assert isinstance(coordinate_transform["transforms"], dict) - transforms: dict[str, Callable[..., NDArray[np.float64]]] = coordinate_transform["transforms"] - logger.debug(f"transforms is {transforms}") - old_coordinate_transforms = [ - transforms[str(dim)] for dim in arr.dims if dim not in target_coordinates - ] - logger.debug(f"old_coordinate_transforms: {old_coordinate_transforms}") - - output_shape = [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]] - - def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray[np.float64]: - logger.debug(f"transform function is {transform}") - return np.reshape( - transform(*meshed_coordinates), - output_shape, - order="C", - ) - - old_dimensions = [compute_coordinate(tr) for tr in old_coordinate_transforms] - - ordered_transformations = [transforms[str(dim)] for dim in arr.dims] - transformed_coordinates = [tr(*meshed_coordinates) for tr in ordered_transformations] - - converted_volume = ( - grid_interpolator(np.array(transformed_coordinates).T) - if not isinstance(grid_interpolator, Interpolator) - else grid_interpolator(transformed_coordinates) - ) - - # Wrap it all up - def acceptable_coordinate(c: NDArray[np.float64] | xr.DataArray) -> bool: - """Return True if the dim of array is subset of dim of coordinate_transform. - - Currently we do this to filter out coordinates - that are functions of the old angular dimensions, - we could forward convert these, but right now we do not - - Args: - c (xr.DataArray): DataArray for check. - - Returns: bool - Return True if the dim of array is subset of dim of coordinate_transform. - """ - if isinstance(c, xr.DataArray): - return set(c.dims).issubset(coordinate_transform["dims"]) - return True - - target_coordinates = {k: v for k, v in target_coordinates.items() if acceptable_coordinate(v)} - data = xr.DataArray( - np.reshape( - converted_volume, - [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]], - order="C", - ), - target_coordinates, - coordinate_transform["dims"], - attrs=arr.attrs, - ) - if as_dataset: - old_mapped_coords = [ - xr.DataArray( - values, - coords=target_coordinates, - dims=coordinate_transform["dims"], - attrs=arr.attrs, - ) - for values in old_dimensions - ] - variables = {"data": data} - variables.update( - dict( - zip( - old_coord_names, - old_mapped_coords, - strict=True, - ), - ), - ) - return xr.Dataset(variables, attrs=arr.attrs) - - return data - - -def _chunk_convert( - arr: xr.DataArray, - bounds: dict[MOMENTUM, tuple[float, float]] | None = None, - resolution: dict[MOMENTUM, float] | None = None, - calibration: DetectorCalibration | None = None, - coords: KspaceCoords | None = None, - **kwargs: Unpack[KspaceCoords], -) -> xr.DataArray: - DESIRED_CHUNK_SIZE = 1000 * 1000 * 20 - TOO_LARGE_CHUNK_SIZE = 100 - n_chunks: np.int_ = np.prod(arr.shape) // DESIRED_CHUNK_SIZE - if n_chunks == 0: - warnings.warn( - "Data size is sufficiently small, set allow_chunks=False", - stacklevel=2, - ) - n_chunks += 1 - - if n_chunks > TOO_LARGE_CHUNK_SIZE: - warnings.warn( - "Input array is very large. Please consider resampling.", - stacklevel=2, - ) - chunk_thickness = np.max(len(arr.eV) // n_chunks, 1) - logger.debug(f"Chunking along energy: {n_chunks}, thickness {chunk_thickness}") - finished = [] - low_idx = 0 - high_idx = chunk_thickness - while low_idx < len(arr.eV): - chunk = arr.isel(eV=slice(low_idx, high_idx)) - if len(chunk.eV) == 1: - chunk = chunk.squeeze(dim="eV") - kchunk = convert_to_kspace( - arr=chunk, - bounds=bounds, - resolution=resolution, - calibration=calibration, - coords=coords, - allow_chunks=False, - **kwargs, - ) - if "eV" not in kchunk.dims: - kchunk = kchunk.expand_dims("eV") - assert isinstance(kchunk, xr.DataArray) - finished.append(kchunk) - low_idx = high_idx - high_idx = min(len(arr.eV), high_idx + chunk_thickness) - return xr.concat(finished, dim="eV") diff --git a/tests/conftest.py b/tests/conftest.py index c4e6296d..9267101b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,8 +9,7 @@ import pytest from lmfit.models import ConstantModel, LinearModel, LorentzianModel, QuadraticModel -import arpes -import arpes.endstations +# import arpes.endstations from arpes.configuration.manager import config_manager from arpes.fits import AffineBroadenedFD from arpes.io import example_data @@ -193,6 +192,9 @@ class Sandbox: @pytest.fixture def sandbox_configuration() -> Iterator[Sandbox]: """Generates a sandboxed configuration of the ARPES data analysis suite.""" + from arpes import plugin_loader # noqa: PLC0415 + from arpes.endstations import registry # noqa: PLC0415 + resources_dir = Path.cwd() / "tests" / "resources" def set_workspace(name: str) -> None: @@ -217,8 +219,8 @@ def load(path: str) -> xr.DataArray | xr.Dataset: with_workspace=set_workspace, load=load, ) - arpes.plugin_loader.load_plugins() + plugin_loader.load_plugins() yield sandbox config_manager.config["WORKSPACE"] = None # arpes.config.CONFIG["WORKSPACE"] = None - arpes.endstations.registry._ENDSTATION_ALIASES = {} + registry._ENDSTATION_ALIASES = {} diff --git a/tests/test_basic_data_loading.py b/tests/test_basic_data_loading.py index 8273f617..c6bcdcf5 100644 --- a/tests/test_basic_data_loading.py +++ b/tests/test_basic_data_loading.py @@ -416,22 +416,24 @@ class TestMetadata: "spectrum_type": "cut", "experimenter": None, "sample": None, - }, + }, "experiment_info": { - "temperature": np.nan, - "temperature_cryotip": np.nan, - "pressure": np.nan, - "polarization": (np.nan, np.nan), - "photon_flux": np.nan, - "photocurrent": np.nan, - "probe": None, - "probe_detail": None, - "analyzer_detail": {"analyzer_name": "Specs PHOIBOS 225", - "parallel_deflectors": True, - "perpendicular_deflectors": True, - "analyzer_type": "hemispherical", - "analyzer_radius": 225}, - }, + "temperature": np.nan, + "temperature_cryotip": np.nan, + "pressure": np.nan, + "polarization": (np.nan, np.nan), + "photon_flux": np.nan, + "photocurrent": np.nan, + "probe": None, + "probe_detail": None, + "analyzer_detail": { + "analyzer_name": "Specs PHOIBOS 225", + "parallel_deflectors": True, + "perpendicular_deflectors": True, + "analyzer_type": "hemispherical", + "analyzer_radius": 225, + }, + }, "analyzer_info": { "lens_mode": "WideAngleMode:400V", "lens_mode_name": None, @@ -446,14 +448,16 @@ class TestMetadata: "work_function": 4.5, }, "beamline_info": { - "hv": pytest.approx(60.0), + "hv": pytest.approx(60.0), "linewidth": np.nan, "photon_polarization": (np.nan, np.nan), - "undulator_info": {"gap": None, - "z": None, - "harmonic": None, - "polarization": None, - "type": None}, + "undulator_info": { + "gap": None, + "z": None, + "harmonic": None, + "polarization": None, + "type": None, + }, "repetition_rate": np.nan, "beam_current": np.nan, "entrance_slit": None, @@ -468,10 +472,12 @@ class TestMetadata: "prebinning": {}, "trapezoidal_correction_strategy": None, "dither_settings": None, - "sweep_settings": {"high_energy": None, - "low_energy": None, - "n_sweeps": None, - "step": None}, + "sweep_settings": { + "high_energy": None, + "low_energy": None, + "n_sweeps": None, + "step": None, + }, "frames_per_slice": np.nan, "frame_duration": np.nan, }, @@ -897,7 +903,7 @@ class TestBasicDataLoading: }, }, ), - # Solaris, Phelix beamline + # Solaris, Phelix beamline ( "phelix_load_cut", { diff --git a/tests/test_conversion_core.py b/tests/test_conversion_core.py index 58f9ca7e..c7f2d125 100644 --- a/tests/test_conversion_core.py +++ b/tests/test_conversion_core.py @@ -3,10 +3,8 @@ import xarray as xr from scipy.interpolate import RegularGridInterpolator -from arpes.utilities.conversion.core import ( - convert_to_kspace, - grid_interpolator_from_dataarray, -) +from arpes.utilities.conversion.api import convert_to_kspace +from arpes.utilities.conversion.core import grid_interpolator_from_dataarray # FILE: tests/test_core.py From e08f226fce0c306b657acba68936a841e83c309b Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 16:56:37 +0900 Subject: [PATCH 08/16] =?UTF-8?q?=F0=9F=92=A1=20=20remap=5Fcoords=5Fto=20i?= =?UTF-8?q?s=20removed=20from=20api.rst?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/api.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/source/api.rst b/docs/source/api.rst index 8ab53875..51898f68 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -141,7 +141,7 @@ Small-Angle Approximated and Volumetric Related .. autosummary:: :toctree: generated/ - utilities.conversion.core.convert_to_kspace + utilities.conversion.api.convert_to_kspace analysis.forward_conversion.convert_coordinate_forward analysis.forward_conversion.convert_through_angular_point analysis.forward_conversion.convert_through_angular_pair @@ -163,7 +163,6 @@ Utilities utilities.conversion.fast_interp.Interpolator utilities.conversion.bounds_calculations.full_angles_to_k - utilities.conversion.remap_manipulator.remap_coords_to Conversion Implementations ~~~~~~~~~~~~~~~~~~~~~~~~~~ From b92b11ddfd598e092cc51ee896ecfeb8e6e6448e Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 21:50:04 +0900 Subject: [PATCH 09/16] =?UTF-8?q?test:=20=F0=9F=92=8D=20add=20test=20for?= =?UTF-8?q?=20remove=5Fcolorbars?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/plotting/utils.py | 22 ++++++++++------------ tests/test_plotting_utils.py | 12 +++++++++++- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/arpes/plotting/utils.py b/src/arpes/plotting/utils.py index 44338607..2b730a44 100644 --- a/src/arpes/plotting/utils.py +++ b/src/arpes/plotting/utils.py @@ -752,21 +752,19 @@ def get_colorbars(fig: Figure | None = None) -> list[Colorbar]: def remove_colorbars(fig: Figure | None = None) -> None: - """Removes colorbars from given (or, if no given figure, current) matplotlib figure. + """Removes colorbars from given (or current) matplotlib figure. Args: - fig: The figure to modify, by default uses the current figure (`plt.gcf()`) + fig: The figure to modify. If None, uses current figure. """ - # TODO: after colorbar removal, plots should be relaxed/rescaled to occupy space previously - # allocated to colorbars for now, can follow this with plt.tight_layout() - COLORBAR_ASPECT_RATIO = 20 - if fig is not None: - for ax in fig.axes: - aspect_ratio = ax.get_aspect() - if isinstance(aspect_ratio, float) and aspect_ratio >= COLORBAR_ASPECT_RATIO: - ax.remove() - else: - remove_colorbars(plt.gcf()) + fig = plt.gcf() if fig is None else fig + + for cbar in get_colorbars(fig): + if hasattr(cbar, "remove"): + cbar.remove() + # fallback (older versions / edge cases) + elif hasattr(cbar, "ax"): + cbar.ax.remove() def calculate_aspect_ratio(data: xr.DataArray) -> float: diff --git a/tests/test_plotting_utils.py b/tests/test_plotting_utils.py index 47c33f98..758a67bd 100644 --- a/tests/test_plotting_utils.py +++ b/tests/test_plotting_utils.py @@ -5,7 +5,17 @@ from matplotlib.cm import ScalarMappable from matplotlib.colorbar import Colorbar -from arpes.plotting.utils import get_colorbars +from arpes.plotting.utils import get_colorbars, remove_colorbars + + +def test_remove_colorbars(): + fig, ax = plt.subplots() + im = ax.imshow([[0, 1], [2, 3]]) + plt.colorbar(im) + + assert len(get_colorbars(fig)) == 1 + remove_colorbars(fig) + assert len(get_colorbars(fig)) == 0 def test_get_colorbars_all_paths(): From af33646b90fe77bd42c65f56aa6318e1d7de66d1 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Fri, 26 Dec 2025 09:01:33 +0900 Subject: [PATCH 10/16] =?UTF-8?q?refactor:=20=F0=9F=92=A1=20refactoring=20?= =?UTF-8?q?=5Fbin=20in=20analysis/general.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/analysis/general.py | 48 ++++++++++--------- .../conversion/bounds_calculations.py | 7 +-- tests/test_analysis_general.py | 22 ++++++++- 3 files changed, 48 insertions(+), 29 deletions(-) diff --git a/src/arpes/analysis/general.py b/src/arpes/analysis/general.py index 24941a98..662358d2 100644 --- a/src/arpes/analysis/general.py +++ b/src/arpes/analysis/general.py @@ -138,7 +138,7 @@ def symmetrize_axis( Args: data: input data - axis_name: name of axis to be symmbetrized. + axis_name: name of axis to be symmetrized. flip_axes (list[str]): lis of axis name to be flipped flipping. Returns: @@ -242,32 +242,36 @@ def _bin( bins: int, method: ReduceMethod, ) -> DataType: + """Bin data along a specified axis and replace bin coordinates with bin centers. + + Args: + data: xarray DataArray or Dataset to bin. + bin_axis: Name of the coordinate along which to bin. + bins: Number of bins. + method: Reduction method, either "sum" or "mean". + + Returns: + Binned xarray object with updated coordinates. + """ + original_left, original_right = ( data.coords[bin_axis].min().item(), data.coords[bin_axis].max().item(), ) original_region = original_right - original_left - if method == "sum": - data = ( - data.groupby_bins(bin_axis, bins, precision=10) - .sum() - .rename({bin_axis + "_bins": bin_axis}) - ) - elif method == "mean": - data = ( - data.groupby_bins(bin_axis, bins, precision=10) - .mean() - .rename({bin_axis + "_bins": bin_axis}) - ) - else: - msg = "method must be sum or mean" + if method not in ("sum", "mean"): + msg = "method must be 'sum' or 'mean'" raise TypeError(msg) - left = data.coords[bin_axis].values[0].left - right = data.coords[bin_axis].values[0].right - left = left + original_region * 0.001 - medium_values = [ - (left + right) / 2, - *[(b.left + b.right) / 2 for b in data.coords[bin_axis].values[1:]], + grouped = data.groupby_bins(bin_axis, bins) + data = getattr(grouped, method)().rename({bin_axis + "_bins": bin_axis}) + + bin_edges = data.coords[bin_axis].values + + first_left = np.nextafter(bin_edges[0].left, bin_edges[0].right) + bin_centers = [(first_left + bin_edges[0].right) / 2] + [ + (b.left + b.right) / 2 for b in bin_edges[1:] ] - data.coords[bin_axis] = np.array(medium_values) + + data.coords[bin_axis] = np.array(bin_centers) + return data diff --git a/src/arpes/utilities/conversion/bounds_calculations.py b/src/arpes/utilities/conversion/bounds_calculations.py index 3cab9430..b7c3487e 100644 --- a/src/arpes/utilities/conversion/bounds_calculations.py +++ b/src/arpes/utilities/conversion/bounds_calculations.py @@ -17,12 +17,7 @@ import xarray as xr from numpy.typing import NDArray -__all__ = ( - "calculate_kp_bounds", - "calculate_kp_kz_bounds", - "calculate_kx_ky_bounds", - "full_angles_to_k", -) +__all__ = ("full_angles_to_k",) def full_angles_to_k( # noqa: PLR0913 diff --git a/tests/test_analysis_general.py b/tests/test_analysis_general.py index 47586073..2b61b8d3 100644 --- a/tests/test_analysis_general.py +++ b/tests/test_analysis_general.py @@ -4,6 +4,7 @@ import pytest import xarray as xr +import arpes.xarray_extensions from arpes.analysis.general import ( _bin, condense, @@ -88,7 +89,11 @@ def test_symmetrize_axis(): energy = np.linspace(-1, 1, 201) data = np.random.default_rng().random(201) da = xr.DataArray(data, coords=[("eV", energy)]) - result = symmetrize_axis(da, axis_name="eV", flip_axes=False) + result = symmetrize_axis( + da, + axis_name="eV", + flip_axes=False, + ) assert isinstance(result, xr.DataArray) assert result.shape == da.shape @@ -107,6 +112,21 @@ def test_rebin(): assert result.sizes["eV"] == 50 +def test_rebin_with_method_sum(): + data = xr.DataArray(np.random.default_rng().random((10, 100)), dims=["x", "eV"]) + shape = {"eV": 50} + result = rebin(data, shape, bin_width=None, method="sum") + assert isinstance(result, xr.DataArray) + assert result.sizes["eV"] == 50 + + +def test_rebin_with_wrong_method(): + data = xr.DataArray(np.random.default_rng().random((10, 100)), dims=["x", "eV"]) + shape = {"eV": 50} + with pytest.raises(TypeError): + rebin(data, shape, bin_width=None, method="invalid_method") + + def test__bin(): data = xr.DataArray(np.arange(100), dims=["x"]) bins = np.linspace(0, 100, 11) From 52e9bac68b70b599ebf0e7251e813b9368ec38bc Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Fri, 26 Dec 2025 09:06:44 +0900 Subject: [PATCH 11/16] =?UTF-8?q?refactor:=20=F0=9F=92=A1=20refactoring=20?= =?UTF-8?q?analysis/general.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/analysis/general.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/arpes/analysis/general.py b/src/arpes/analysis/general.py index 662358d2..a7372e67 100644 --- a/src/arpes/analysis/general.py +++ b/src/arpes/analysis/general.py @@ -253,12 +253,6 @@ def _bin( Returns: Binned xarray object with updated coordinates. """ - - original_left, original_right = ( - data.coords[bin_axis].min().item(), - data.coords[bin_axis].max().item(), - ) - original_region = original_right - original_left if method not in ("sum", "mean"): msg = "method must be 'sum' or 'mean'" raise TypeError(msg) From 1d751d67fab5c21e7917373303575203091b2fc3 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Fri, 26 Dec 2025 11:05:08 +0900 Subject: [PATCH 12/16] =?UTF-8?q?=E2=9C=85=20Follow=20=5Fbin=20algorithm?= =?UTF-8?q?=20improvement?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/test_analysis_xps.py | 4 ++-- tests/test_stack_plot.py | 18 +++++++++--------- tests/test_utils_region.py | 4 ++-- tests/test_xps.py | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/test_analysis_xps.py b/tests/test_analysis_xps.py index 6a1d7af5..34150767 100644 --- a/tests/test_analysis_xps.py +++ b/tests/test_analysis_xps.py @@ -1,9 +1,9 @@ """Unit test for analysis.xps.""" - import numpy as np import xarray as xr +import arpes.xarray_extensions # noqa: F401 from arpes.analysis.xps import approximate_core_levels @@ -11,4 +11,4 @@ def test_approximate_core_levels(xps_map: xr.Dataset) -> None: """Test the core level approximation function.""" xps_spectrum = xps_map.spectrum.sum(["x", "y"], keep_attrs=True) approx_levels = approximate_core_levels(xps_spectrum, promenance=5) - np.testing.assert_almost_equal(approx_levels[0], -34.5066501491) + np.testing.assert_almost_equal(approx_levels[0], -34.5065) diff --git a/tests/test_stack_plot.py b/tests/test_stack_plot.py index 245ca27d..73e8ad8e 100644 --- a/tests/test_stack_plot.py +++ b/tests/test_stack_plot.py @@ -148,19 +148,19 @@ def test_stack_dispersion_plot_2(self, dataarray_cut2: xr.DataArray) -> None: paths[0].get_paths()[0].vertices[:3], np.array( [ - [9.0, np.rad2deg(0.19602282)], - [9.0, np.rad2deg(0.19632079)], - [9.002, np.rad2deg(0.19634603)], - ] + [9.0, 11.231], + [9.0, 11.248082], + [9.002, 11.249529], + ], ), ) np.testing.assert_allclose( actual=paths[-1].get_paths()[0].vertices[:3], desired=np.array( [ - [9.0, np.rad2deg(-0.19602282)], - [9.0, np.rad2deg(-0.19578132)], - [9.002, np.rad2deg(-0.19573485)], + [9.0, -11.2435], + [9.0, -11.229656], + [9.002, -11.226992], ], ), ) @@ -168,8 +168,8 @@ def test_stack_dispersion_plot_2(self, dataarray_cut2: xr.DataArray) -> None: assert xmin == 8.95 assert xmax == 10.05 ymin, ymax = ax.get_ylim() - np.testing.assert_allclose(ymin, -12.363908917090033) - np.testing.assert_allclose(ymax, 12.55392725889067) + np.testing.assert_allclose(ymin, -12.376731) + np.testing.assert_allclose(ymax, 12.55435) class TestFlatStackPlot: diff --git a/tests/test_utils_region.py b/tests/test_utils_region.py index f3a2310f..4b5c1101 100644 --- a/tests/test_utils_region.py +++ b/tests/test_utils_region.py @@ -61,8 +61,8 @@ def test_meso_effective_selector(dataarray_cut: xr.DataArray) -> None: def test_find_spectrum_angular_edges_full(dataarray_cut: xr.DataArray) -> None: """Test for find_spectrum_angular_edges_full.""" desired = ( - np.array([0.26964973, 0.26092309, 0.30281099, 0.28012171, 0.26441375, 0.25394177]), - np.array([0.59951696, 0.60824361, 0.60998894, 0.60824361, 0.54890241, 0.56810104]), + np.array([0.269124, 0.260397, 0.302285, 0.279596, 0.263888, 0.253416]), + np.array([0.598991, 0.607718, 0.609463, 0.607718, 0.548377, 0.567575]), ) results = find_spectrum_angular_edges_full(dataarray_cut) np.testing.assert_allclose( diff --git a/tests/test_xps.py b/tests/test_xps.py index f06df9ae..e67abd41 100644 --- a/tests/test_xps.py +++ b/tests/test_xps.py @@ -18,5 +18,5 @@ def test_approximate_core_levels(self, dataarray_cut2: xr.DataArray) -> None: energies = approximate_core_levels(dataarray_cut2.S.sum_other(["eV"])) np.testing.assert_allclose( energies, - [9.36826347305, 9.58383233535, 9.89520958085], + [9.368, 9.584, 9.895], ) From 953c797cca9150e459d312767bc47e4843c8400c Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Thu, 25 Dec 2025 11:23:45 +0900 Subject: [PATCH 13/16] =?UTF-8?q?=F0=9F=92=A5=20=20convert=5Fto=5Fkspace?= =?UTF-8?q?=20is=20moved=20to=20api.py=20from=20core.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/api.rst | 3 +- src/arpes/analysis/forward_conversion.py | 2 +- src/arpes/analysis/path.py | 2 +- src/arpes/correction/trapezoid.py | 2 +- .../coordinates.py} | 0 src/arpes/plotting/dispersion.py | 2 +- src/arpes/utilities/conversion/__init__.py | 3 +- src/arpes/utilities/conversion/api.py | 275 ++++++++++++++ src/arpes/utilities/conversion/coordinates.py | 254 +++++++++++++ src/arpes/utilities/conversion/core.py | 350 +----------------- tests/conftest.py | 10 +- tests/test_basic_data_loading.py | 58 +-- tests/test_conversion_core.py | 6 +- tests/test_correction_intensity_map.py | 34 +- 14 files changed, 595 insertions(+), 406 deletions(-) rename src/arpes/{utilities/conversion/remap_manipulator.py => plotting/coordinates.py} (100%) create mode 100644 src/arpes/utilities/conversion/api.py create mode 100644 src/arpes/utilities/conversion/coordinates.py diff --git a/docs/source/api.rst b/docs/source/api.rst index 8ab53875..51898f68 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -141,7 +141,7 @@ Small-Angle Approximated and Volumetric Related .. autosummary:: :toctree: generated/ - utilities.conversion.core.convert_to_kspace + utilities.conversion.api.convert_to_kspace analysis.forward_conversion.convert_coordinate_forward analysis.forward_conversion.convert_through_angular_point analysis.forward_conversion.convert_through_angular_pair @@ -163,7 +163,6 @@ Utilities utilities.conversion.fast_interp.Interpolator utilities.conversion.bounds_calculations.full_angles_to_k - utilities.conversion.remap_manipulator.remap_coords_to Conversion Implementations ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/arpes/analysis/forward_conversion.py b/src/arpes/analysis/forward_conversion.py index 59aa0fa0..4bb3833e 100644 --- a/src/arpes/analysis/forward_conversion.py +++ b/src/arpes/analysis/forward_conversion.py @@ -25,13 +25,13 @@ from arpes.debug import setup_logger from arpes.provenance import update_provenance from arpes.utilities import normalize_to_spectrum +from arpes.utilities.conversion.api import convert_to_kspace from arpes.utilities.conversion.bounds_calculations import ( euler_to_kx, euler_to_ky, euler_to_kz, full_angles_to_k, ) -from arpes.utilities.conversion.core import convert_to_kspace from arpes.xarray_extensions.accessor.spectrum_type import EnergyNotation, SpectrumType if TYPE_CHECKING: diff --git a/src/arpes/analysis/path.py b/src/arpes/analysis/path.py index 34725780..d5b55341 100644 --- a/src/arpes/analysis/path.py +++ b/src/arpes/analysis/path.py @@ -21,7 +21,7 @@ from arpes.debug import setup_logger from arpes.provenance import Provenance, provenance -from arpes.utilities.conversion.core import convert_coordinates +from arpes.utilities.conversion.coordinates import convert_coordinates from arpes.utilities.conversion.grids import determine_axis_type LOGLEVELS = (DEBUG, INFO) diff --git a/src/arpes/correction/trapezoid.py b/src/arpes/correction/trapezoid.py index 04a177b7..619c4a05 100644 --- a/src/arpes/correction/trapezoid.py +++ b/src/arpes/correction/trapezoid.py @@ -26,7 +26,7 @@ from arpes.debug import setup_logger from arpes.utilities.conversion.base import CoordinateConverter -from arpes.utilities.conversion.core import convert_coordinates +from arpes.utilities.conversion.coordinates import convert_coordinates if TYPE_CHECKING: from collections.abc import Callable, Hashable diff --git a/src/arpes/utilities/conversion/remap_manipulator.py b/src/arpes/plotting/coordinates.py similarity index 100% rename from src/arpes/utilities/conversion/remap_manipulator.py rename to src/arpes/plotting/coordinates.py diff --git a/src/arpes/plotting/dispersion.py b/src/arpes/plotting/dispersion.py index 86ab75a2..f2e027ec 100644 --- a/src/arpes/plotting/dispersion.py +++ b/src/arpes/plotting/dispersion.py @@ -14,10 +14,10 @@ from arpes.analysis.path import slice_along_path from arpes.io import load_data +from arpes.plotting.coordinates import remap_coords_to from arpes.preparation import normalize_dim from arpes.provenance import save_plot_provenance from arpes.utilities import bz -from arpes.utilities.conversion import remap_coords_to from .utils import label_for_colorbar, label_for_dim, label_for_symmetry_point, path_for_plot diff --git a/src/arpes/utilities/conversion/__init__.py b/src/arpes/utilities/conversion/__init__.py index c008b203..dba12b67 100644 --- a/src/arpes/utilities/conversion/__init__.py +++ b/src/arpes/utilities/conversion/__init__.py @@ -2,6 +2,5 @@ from __future__ import annotations +from .api import convert_to_kspace from .calibration import DetectorCalibration -from .core import convert_to_kspace -from .remap_manipulator import remap_coords_to diff --git a/src/arpes/utilities/conversion/api.py b/src/arpes/utilities/conversion/api.py new file mode 100644 index 00000000..3ffa58ab --- /dev/null +++ b/src/arpes/utilities/conversion/api.py @@ -0,0 +1,275 @@ +"""Public API for momentum-space (k-space) conversion of ARPES data. + +This module defines the user-facing entry points for converting ARPES +data from angle/energy space into momentum space. The functions provided +here are stable, documented, and intended to be imported and used directly +by analysis scripts and notebooks. + +Design principles: + - This module exposes *high-level orchestration functions* only. + - Implementation details (coordinate transforms, interpolation, + grid construction) are delegated to lower-level modules. + - Functions here operate on `xarray.DataArray` objects and return + new objects without modifying inputs in place. + +Scope: + - Backward (interpolating) k-space conversion via `convert_to_kspace` + - Chunk-aware conversion for large energy stacks + - Provenance tracking for reproducibility + +Non-goals: + - This module does not implement low-level coordinate transforms. + - This module does not define interpolation kernels or numerical solvers. + - Forward (coordinate-only) conversion routines live elsewhere. + +Typical usage: + >>> from arpes.utilities.conversion.api import convert_to_kspace + >>> kdata = convert_to_kspace(data, kx=..., ky=...) + +Module structure: + - convert_to_kspace: + Primary public entry point for k-space conversion. + - _chunk_convert: + Internal helper for chunk-wise conversion (not part of the public API). + +See Also: + - arpes.utilities.conversion.coordinates: + Low-level coordinate transformation utilities. + - arpes.utilities.conversion.core: + Internal conversion machinery and interpolation logic. +""" + +from __future__ import annotations + +import warnings +from logging import DEBUG, INFO +from typing import TYPE_CHECKING, Unpack + +import numpy as np +import xarray as xr + +from arpes.debug import setup_logger +from arpes.provenance import update_provenance +from arpes.utilities import normalize_to_spectrum +from arpes.xarray_extensions.accessor.spectrum_type import AngleUnit + +from .coordinates import convert_coordinates +from .core import _is_dims_match_coordinate_convert +from .grids import ( + determine_momentum_axes_from_measurement_axes, + is_dimension_convertible_to_momentum, +) +from .kx_ky_conversion import ConvertKp, ConvertKxKy +from .kz_conversion import ConvertKpKz + +if TYPE_CHECKING: + from collections.abc import Hashable + + from numpy.typing import NDArray + + from arpes._typing.attrs_property import KspaceCoords + from arpes._typing.base import MOMENTUM + from arpes.utilities.conversion.base import CoordinateConverter + from arpes.utilities.conversion.calibration import DetectorCalibration + + +LOGLEVELS = (DEBUG, INFO) +LOGLEVEL = LOGLEVELS[1] +logger = setup_logger(__name__, LOGLEVEL) + + +@update_provenance("Automatically k-space converted") +def convert_to_kspace( # noqa: PLR0913 + arr: xr.DataArray, + *, + bounds: dict[MOMENTUM, tuple[float, float]] | None = None, + resolution: dict[MOMENTUM, float] | None = None, + calibration: DetectorCalibration | None = None, + coords: KspaceCoords | None = None, + allow_chunks: bool = False, + **kwargs: Unpack[KspaceCoords], +) -> xr.DataArray: + """Converts volumetric the data to momentum space ("backwards"). Typically what you want. + + Works in general by regridding the data into the new coordinate space and then + interpolating back into the original data. + + For forward conversion, see sibling methods. Forward conversion works by + totally unchanged by the conversion (if we do not apply a Jacobian correction), but the + converting the coordinates, rather than by interpolating the data. As a result, the data will be + coordinates will no longer have equal spacing. + + This is only really useful for zero and one dimensional data because for two dimensional data, + the coordinates must become two dimensional in order to fully specify every data point + (this is true in generality, in 3D the coordinates must become 3D as well). + + The only exception to this is if the extra axes do not need to be k-space converted. As is the + case where one of the dimensions is `cycle` or `delay`, for instance. + + You can request a particular resolution for the new data with the `resolution=` parameter, + or a specific set of bounds with the `bounds=` + + Examples: + Convert a 2D cut with automatically inferred range and resolution. + + >>> convert_to_kspace(arpes.io.load_example_data()) # doctest: +SKIP + + xr.DataArray(...) + + Convert a 3D map with a specified momentum window + + >>> convert_to_kspace( # doctest: +SKIP + fermi_surface_map, + kx=np.linspace(-1, 1, 200), + ky=np.linspace(-1, 1, 350), + ) + xr.DataArray(...) + + Args: + arr (xr.DataArray): ARPES data + bounds (dict[MOMENTUM, tuple[float, float]], optional): + The key is the axis name. The value is the bounds. Defaults to {}. + If not set this arg, set coords. + resolution (dict[Momentum, float], optional): dict for the energy/angular resolution. + calibration (DetectorCalibration, optional): DetectorCalibration object. Defaults to None. + coords (KspaceCoords, optional): Coordinate of k-space. Defaults to {}. + allow_chunks (bool): [description]. Defaults to False. + **kwargs: treated as coords. + + Raises: + NotImplementedError: [description] + AnalysisError: [description] + ValueError: [description] + + Returns: + xr.DataArray: Converted ARPES (k-space) data. + """ + coords = {} if coords is None else coords + assert coords is not None + coords.update(kwargs) + + bounds = bounds or {} + arr = arr if isinstance(arr, xr.DataArray) else normalize_to_spectrum(arr) + assert isinstance(arr, xr.DataArray) + if arr.S.angle_unit is AngleUnit.DEG: + arr = arr.S.switched_angle_unit() + logger.debug(f"bounds (covnert_to_kspace): {bounds}") + logger.debug(f"keys in coords (convert_to_kspace): {coords.keys()}") + # Chunking logic + if allow_chunks and ("eV" in arr.dims) and len(arr.eV) > 50: # noqa: PLR2004 + return _chunk_convert( + arr=arr, + bounds=bounds, + resolution=resolution, + calibration=calibration, + coords=coords, + **kwargs, + ) + momentum_incompatibles: list[str] = [ + str(d) + for d in arr.dims + if not is_dimension_convertible_to_momentum(str(d)) and str(d) != "eV" + ] + momentum_compatibles: list[str] = sorted( # Literal["phi", "theta", "beta", "chi", "psi", "hv"] + [str(d) for d in arr.dims if is_dimension_convertible_to_momentum(str(d))], + ) + + if not momentum_compatibles: + return arr # no need to convert, might be XPS or similar + + converted_dims: list[str] = ( + (["eV"] if ("eV" in arr.dims) else []) + + determine_momentum_axes_from_measurement_axes( + momentum_compatibles, + ) # axis_names: list[Literal["phi", "beta", "psi", "theta", "hv"]], + + momentum_incompatibles + ) + + tupled_momentum_compatibles = tuple(momentum_compatibles) + convert_cls: type[ConvertKp | ConvertKxKy | ConvertKpKz] | None = None + if _is_dims_match_coordinate_convert(tupled_momentum_compatibles): + convert_cls = { + ("phi",): ConvertKp, + ("beta", "phi"): ConvertKxKy, + ("phi", "theta"): ConvertKxKy, + ("phi", "psi"): ConvertKxKy, + # ('chi', 'phi',): ConvertKxKy, + ("hv", "phi"): ConvertKpKz, + }.get(tupled_momentum_compatibles) + assert convert_cls is not None, "Cannot select convert class" + + converter: CoordinateConverter = convert_cls( + arr=arr, + dim_order=converted_dims, + calibration=calibration, + ) + + converted_coordinates: dict[Hashable, NDArray[np.float64]] = converter.get_coordinates( + resolution=resolution, + bounds=bounds, + ) + if not set(coords.keys()).issubset(converted_coordinates.keys()): + extra = set(coords.keys()).difference(converted_coordinates.keys()) + msg = f"Unexpected passed coordinates: {extra}" + raise ValueError(msg) + converted_coordinates.update(**coords) # type: ignore[misc] + result = convert_coordinates( + arr, + target_coordinates=converted_coordinates, + coordinate_transform={ + "dims": converted_dims, + "transforms": {str(dim): converter.conversion_for(dim) for dim in arr.dims}, + }, + ) + assert isinstance(result, xr.DataArray) + return result + + +def _chunk_convert( + arr: xr.DataArray, + bounds: dict[MOMENTUM, tuple[float, float]] | None = None, + resolution: dict[MOMENTUM, float] | None = None, + calibration: DetectorCalibration | None = None, + coords: KspaceCoords | None = None, + **kwargs: Unpack[KspaceCoords], +) -> xr.DataArray: + DESIRED_CHUNK_SIZE = 1000 * 1000 * 20 + TOO_LARGE_CHUNK_SIZE = 100 + n_chunks: np.int_ = np.prod(arr.shape) // DESIRED_CHUNK_SIZE + if n_chunks == 0: + warnings.warn( + "Data size is sufficiently small, set allow_chunks=False", + stacklevel=2, + ) + n_chunks += 1 + + if n_chunks > TOO_LARGE_CHUNK_SIZE: + warnings.warn( + "Input array is very large. Please consider resampling.", + stacklevel=2, + ) + chunk_thickness = np.max(len(arr.eV) // n_chunks, 1) + logger.debug(f"Chunking along energy: {n_chunks}, thickness {chunk_thickness}") + finished = [] + low_idx = 0 + high_idx = chunk_thickness + while low_idx < len(arr.eV): + chunk = arr.isel(eV=slice(low_idx, high_idx)) + if len(chunk.eV) == 1: + chunk = chunk.squeeze(dim="eV") + kchunk = convert_to_kspace( + arr=chunk, + bounds=bounds, + resolution=resolution, + calibration=calibration, + coords=coords, + allow_chunks=False, + **kwargs, + ) + if "eV" not in kchunk.dims: + kchunk = kchunk.expand_dims("eV") + assert isinstance(kchunk, xr.DataArray) + finished.append(kchunk) + low_idx = high_idx + high_idx = min(len(arr.eV), high_idx + chunk_thickness) + return xr.concat(finished, dim="eV") diff --git a/src/arpes/utilities/conversion/coordinates.py b/src/arpes/utilities/conversion/coordinates.py new file mode 100644 index 00000000..612b1267 --- /dev/null +++ b/src/arpes/utilities/conversion/coordinates.py @@ -0,0 +1,254 @@ +"""Low-level coordinate transformation utilities. + +This module contains internal, low-level routines used to convert ARPES data +between coordinate systems (e.g. angle space to momentum space) using +volumetric interpolation. + +⚠️ Design notes +---------------- +The functions defined here are **NOT part of the public API**. + +They intentionally expose implementation details such as: +- explicit target coordinate grids, +- transformation dictionaries, +- interpolation order and reshaping rules, +- assumptions about xarray dimensions and accessors. + +As a result: +- This module MUST NOT be re-exported from ``arpes.utilities.conversion.__init__``. +- Users should NOT import from this module directly. +- Backward compatibility is NOT guaranteed. + +Public-facing coordinate conversion functionality is provided instead via: +- higher-level analysis routines (e.g. ``convert_to_kspace``), +- xarray accessors (``DataArray.S``), +- or dedicated user APIs in the ``analysis`` layer. + +🧱 Architectural role +--------------------- +This module belongs to the **internal utility layer** and is designed to be: + +- shared by plotting and analysis code, +- flexible and powerful, +- free to evolve without API constraints. + +It should depend only on: +- NumPy, +- xarray, +- and other internal utilities, + +and must not introduce dependencies on higher-level modules such as +``analysis``, ``plotting``, or xarray accessors beyond what is strictly required. + +If you are looking for a user-facing function, this is probably not the module +you want to import. +""" + +from __future__ import annotations + +import contextlib +from logging import DEBUG, INFO +from typing import TYPE_CHECKING, TypedDict + +import numpy as np +import xarray as xr + +from arpes.debug import setup_logger + +from .core import grid_interpolator_from_dataarray +from .fast_interp import Interpolator + +if TYPE_CHECKING: + from collections.abc import Callable, Hashable + + from numpy.typing import NDArray + + from arpes._typing.base import XrTypes + + +LOGLEVELS = (DEBUG, INFO) +LOGLEVEL = LOGLEVELS[1] +logger = setup_logger(__name__, LOGLEVEL) + + +class CoordinateTransform(TypedDict, total=True): + """Internal specification of a coordinate transformation. + + This TypedDict defines the minimal contract required by the low-level + volumetric coordinate conversion engine. It describes: + + - the ordered set of target dimensions in the output space, and + - a mapping from each source/target dimension name to a callable + that computes the corresponding coordinate values. + + ⚠️ This is an internal data structure. + ------------------------------------ + It is **not part of the public API** and may change without notice. + Users should not construct or rely on this object directly. + + The structure is intentionally lightweight and flexible to support + different coordinate systems (e.g. angle space, momentum space) without + imposing a rigid class hierarchy. + + Fields + ------ + dims : list[str] or list[Hashable] + Ordered names of the target coordinate dimensions. + The order determines the shape and ordering of the output array. + + In most practical ARPES use cases, this will be something like:: + + ["kp"] # cut data + ["kx", "ky"] # 2D momentum maps + ["kx", "ky", "kz"] # 3D momentum volumes + + but no specific coordinate system is assumed at this level. + + transforms : dict[str, Callable[..., NDArray[np.float64]]] + Mapping from coordinate names to transformation functions. + + Each callable must accept a sequence of meshed coordinate arrays + (as produced by ``numpy.meshgrid``) and return a NumPy array of + transformed coordinate values. + + The keys of this dictionary are expected to include: + - all original dimensions of the input DataArray, and + - all target dimensions listed in ``dims``. + + Notes: + - No validation of physical correctness is performed here. + - The numerical meaning of the transforms is defined entirely by + the calling code. + - This structure is designed to support volumetric interpolation + workflows and should remain free of higher-level concepts such as + spectrum type, plotting logic, or experiment metadata. + """ + + dims: list[str] | list[Hashable] # in most case dims should be Literal["kp", "kx", "ky", "kz"]] + transforms: dict[str, Callable[..., NDArray[np.float64]]] + + +def convert_coordinates( + arr: xr.DataArray, + target_coordinates: dict[Hashable, NDArray[np.float64]], + coordinate_transform: CoordinateTransform, + *, + as_dataset: bool = False, +) -> XrTypes: + """Return the band structure data (converted to k-space). + + Args: + arr(xr.DataArray): ARPES data + target_coordinates:(dict[Hashable, NDArray[np.float64]]): coorrdinate for ... + coordinate_transform(dict[str, list[str] | Callable]): coordinat for ... + as_dataset(bool): if True, return the data as the dataSet + + Returns: + XrTypes + """ + assert isinstance(arr, xr.DataArray) + ordered_source_dimensions = arr.dims + + grid_interpolator = grid_interpolator_from_dataarray( + arr.transpose(*ordered_source_dimensions), # TODO(RA): No need? -- perhaps no. + fill_value=np.nan, + ) + + # Skip the Jacobian correction for now + # Convert the raw coordinate axes to a set of gridded points + logger.debug( + f"meshgrid: {[len(target_coordinates[dim]) for dim in coordinate_transform['dims']]}", + ) + meshed_coordinates = [ + meshed_coord.ravel() + for meshed_coord in np.meshgrid( + *[target_coordinates[dim] for dim in coordinate_transform["dims"]], + indexing="ij", + ) + ] + + if "eV" not in arr.dims: + with contextlib.suppress(ValueError): + meshed_coordinates = [arr.S.lookup_offset_coord("eV"), *meshed_coordinates] + old_coord_names = [str(dim) for dim in arr.dims if dim not in target_coordinates] + assert isinstance(coordinate_transform["transforms"], dict) + transforms: dict[str, Callable[..., NDArray[np.float64]]] = coordinate_transform["transforms"] + logger.debug(f"transforms is {transforms}") + old_coordinate_transforms = [ + transforms[str(dim)] for dim in arr.dims if dim not in target_coordinates + ] + logger.debug(f"old_coordinate_transforms: {old_coordinate_transforms}") + + output_shape = [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]] + + def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray[np.float64]: + logger.debug(f"transform function is {transform}") + return np.reshape( + transform(*meshed_coordinates), + output_shape, + order="C", + ) + + old_dimensions = [compute_coordinate(tr) for tr in old_coordinate_transforms] + + ordered_transformations = [transforms[str(dim)] for dim in arr.dims] + transformed_coordinates = [tr(*meshed_coordinates) for tr in ordered_transformations] + + converted_volume = ( + grid_interpolator(np.array(transformed_coordinates).T) + if not isinstance(grid_interpolator, Interpolator) + else grid_interpolator(transformed_coordinates) + ) + + # Wrap it all up + def acceptable_coordinate(c: NDArray[np.float64] | xr.DataArray) -> bool: + """Return True if the dim of array is subset of dim of coordinate_transform. + + Currently we do this to filter out coordinates + that are functions of the old angular dimensions, + we could forward convert these, but right now we do not + + Args: + c (xr.DataArray): DataArray for check. + + Returns: bool + Return True if the dim of array is subset of dim of coordinate_transform. + """ + if isinstance(c, xr.DataArray): + return set(c.dims).issubset(coordinate_transform["dims"]) + return True + + target_coordinates = {k: v for k, v in target_coordinates.items() if acceptable_coordinate(v)} + data = xr.DataArray( + np.reshape( + converted_volume, + [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]], + order="C", + ), + target_coordinates, + coordinate_transform["dims"], + attrs=arr.attrs, + ) + if as_dataset: + old_mapped_coords = [ + xr.DataArray( + values, + coords=target_coordinates, + dims=coordinate_transform["dims"], + attrs=arr.attrs, + ) + for values in old_dimensions + ] + variables = {"data": data} + variables.update( + dict( + zip( + old_coord_names, + old_mapped_coords, + strict=True, + ), + ), + ) + return xr.Dataset(variables, attrs=arr.attrs) + + return data diff --git a/src/arpes/utilities/conversion/core.py b/src/arpes/utilities/conversion/core.py index ac36cf19..416eecb9 100644 --- a/src/arpes/utilities/conversion/core.py +++ b/src/arpes/utilities/conversion/core.py @@ -22,40 +22,20 @@ from __future__ import annotations -import contextlib -import warnings from logging import DEBUG, INFO -from typing import TYPE_CHECKING, Literal, TypedDict, TypeGuard, Unpack +from typing import TYPE_CHECKING, Literal, TypeGuard import numpy as np import xarray as xr from scipy.interpolate import RegularGridInterpolator from arpes.debug import setup_logger -from arpes.provenance import update_provenance -from arpes.utilities import normalize_to_spectrum -from arpes.xarray_extensions.accessor.spectrum_type import AngleUnit from .fast_interp import Interpolator -from .grids import ( - determine_momentum_axes_from_measurement_axes, - is_dimension_convertible_to_momentum, -) -from .kx_ky_conversion import ConvertKp, ConvertKxKy -from .kz_conversion import ConvertKpKz if TYPE_CHECKING: - from collections.abc import Callable, Hashable - from numpy.typing import NDArray - from arpes._typing.attrs_property import KspaceCoords - from arpes._typing.base import MOMENTUM, XrTypes - from arpes.utilities.conversion.base import CoordinateConverter - from arpes.utilities.conversion.calibration import DetectorCalibration - -__all__ = ["convert_to_kspace"] - LOGLEVELS = (DEBUG, INFO) LOGLEVEL = LOGLEVELS[1] @@ -115,331 +95,3 @@ def _is_dims_match_coordinate_convert( ("phi", "psi"), ("hv", "phi"), } - - -@update_provenance("Automatically k-space converted") -def convert_to_kspace( # noqa: PLR0913 - arr: xr.DataArray, - *, - bounds: dict[MOMENTUM, tuple[float, float]] | None = None, - resolution: dict[MOMENTUM, float] | None = None, - calibration: DetectorCalibration | None = None, - coords: KspaceCoords | None = None, - allow_chunks: bool = False, - **kwargs: Unpack[KspaceCoords], -) -> xr.DataArray: - """Converts volumetric the data to momentum space ("backwards"). Typically what you want. - - Works in general by regridding the data into the new coordinate space and then - interpolating back into the original data. - - For forward conversion, see sibling methods. Forward conversion works by - totally unchanged by the conversion (if we do not apply a Jacobian correction), but the - converting the coordinates, rather than by interpolating the data. As a result, the data will be - coordinates will no longer have equal spacing. - - This is only really useful for zero and one dimensional data because for two dimensional data, - the coordinates must become two dimensional in order to fully specify every data point - (this is true in generality, in 3D the coordinates must become 3D as well). - - The only exception to this is if the extra axes do not need to be k-space converted. As is the - case where one of the dimensions is `cycle` or `delay`, for instance. - - You can request a particular resolution for the new data with the `resolution=` parameter, - or a specific set of bounds with the `bounds=` - - Examples: - Convert a 2D cut with automatically inferred range and resolution. - - >>> convert_to_kspace(arpes.io.load_example_data()) # doctest: +SKIP - - xr.DataArray(...) - - Convert a 3D map with a specified momentum window - - >>> convert_to_kspace( # doctest: +SKIP - fermi_surface_map, - kx=np.linspace(-1, 1, 200), - ky=np.linspace(-1, 1, 350), - ) - xr.DataArray(...) - - Args: - arr (xr.DataArray): ARPES data - bounds (dict[MOMENTUM, tuple[float, float]], optional): - The key is the axis name. The value is the bounds. Defaults to {}. - If not set this arg, set coords. - resolution (dict[Momentum, float], optional): dict for the energy/angular resolution. - calibration (DetectorCalibration, optional): DetectorCalibration object. Defaults to None. - coords (KspaceCoords, optional): Coordinate of k-space. Defaults to {}. - allow_chunks (bool): [description]. Defaults to False. - **kwargs: treated as coords. - - Raises: - NotImplementedError: [description] - AnalysisError: [description] - ValueError: [description] - - Returns: - xr.DataArray: Converted ARPES (k-space) data. - """ - coords = {} if coords is None else coords - assert coords is not None - coords.update(kwargs) - - bounds = bounds or {} - arr = arr if isinstance(arr, xr.DataArray) else normalize_to_spectrum(arr) - assert isinstance(arr, xr.DataArray) - if arr.S.angle_unit is AngleUnit.DEG: - arr = arr.S.switched_angle_unit() - logger.debug(f"bounds (covnert_to_kspace): {bounds}") - logger.debug(f"keys in coords (convert_to_kspace): {coords.keys()}") - # Chunking logic - if allow_chunks and ("eV" in arr.dims) and len(arr.eV) > 50: # noqa: PLR2004 - return _chunk_convert( - arr=arr, - bounds=bounds, - resolution=resolution, - calibration=calibration, - coords=coords, - **kwargs, - ) - momentum_incompatibles: list[str] = [ - str(d) - for d in arr.dims - if not is_dimension_convertible_to_momentum(str(d)) and str(d) != "eV" - ] - momentum_compatibles: list[str] = sorted( # Literal["phi", "theta", "beta", "chi", "psi", "hv"] - [str(d) for d in arr.dims if is_dimension_convertible_to_momentum(str(d))], - ) - - if not momentum_compatibles: - return arr # no need to convert, might be XPS or similar - - converted_dims: list[str] = ( - (["eV"] if ("eV" in arr.dims) else []) - + determine_momentum_axes_from_measurement_axes( - momentum_compatibles, - ) # axis_names: list[Literal["phi", "beta", "psi", "theta", "hv"]], - + momentum_incompatibles - ) - - tupled_momentum_compatibles = tuple(momentum_compatibles) - convert_cls: type[ConvertKp | ConvertKxKy | ConvertKpKz] | None = None - if _is_dims_match_coordinate_convert(tupled_momentum_compatibles): - convert_cls = { - ("phi",): ConvertKp, - ("beta", "phi"): ConvertKxKy, - ("phi", "theta"): ConvertKxKy, - ("phi", "psi"): ConvertKxKy, - # ('chi', 'phi',): ConvertKxKy, - ("hv", "phi"): ConvertKpKz, - }.get(tupled_momentum_compatibles) - assert convert_cls is not None, "Cannot select convert class" - - converter: CoordinateConverter = convert_cls( - arr=arr, - dim_order=converted_dims, - calibration=calibration, - ) - - converted_coordinates: dict[Hashable, NDArray[np.float64]] = converter.get_coordinates( - resolution=resolution, - bounds=bounds, - ) - if not set(coords.keys()).issubset(converted_coordinates.keys()): - extra = set(coords.keys()).difference(converted_coordinates.keys()) - msg = f"Unexpected passed coordinates: {extra}" - raise ValueError(msg) - converted_coordinates.update(**coords) # type: ignore[misc] - result = convert_coordinates( - arr, - target_coordinates=converted_coordinates, - coordinate_transform={ - "dims": converted_dims, - "transforms": {str(dim): converter.conversion_for(dim) for dim in arr.dims}, - }, - ) - assert isinstance(result, xr.DataArray) - return result - - -class CoordinateTransform(TypedDict, total=True): - dims: list[str] | list[Hashable] # in most case dims should be Literal["kp", "kx", "ky", "kz"]] - transforms: dict[str, Callable[..., NDArray[np.float64]]] - - -def convert_coordinates( - arr: xr.DataArray, - target_coordinates: dict[Hashable, NDArray[np.float64]], - coordinate_transform: CoordinateTransform, - *, - as_dataset: bool = False, -) -> XrTypes: - """Return the band structure data (converted to k-space). - - Args: - arr(xr.DataArray): ARPES data - target_coordinates:(dict[Hashable, NDArray[np.float64]]): coorrdinate for ... - coordinate_transform(dict[str, list[str] | Callable]): coordinat for ... - as_dataset(bool): if True, return the data as the dataSet - - Returns: - XrTypes - """ - assert isinstance(arr, xr.DataArray) - ordered_source_dimensions = arr.dims - - grid_interpolator = grid_interpolator_from_dataarray( - arr.transpose(*ordered_source_dimensions), # TODO(RA): No need? -- perhaps no. - fill_value=np.nan, - ) - - # Skip the Jacobian correction for now - # Convert the raw coordinate axes to a set of gridded points - logger.debug( - f"meshgrid: {[len(target_coordinates[dim]) for dim in coordinate_transform['dims']]}", - ) - meshed_coordinates = [ - meshed_coord.ravel() - for meshed_coord in np.meshgrid( - *[target_coordinates[dim] for dim in coordinate_transform["dims"]], - indexing="ij", - ) - ] - - if "eV" not in arr.dims: - with contextlib.suppress(ValueError): - meshed_coordinates = [arr.S.lookup_offset_coord("eV"), *meshed_coordinates] - old_coord_names = [str(dim) for dim in arr.dims if dim not in target_coordinates] - assert isinstance(coordinate_transform["transforms"], dict) - transforms: dict[str, Callable[..., NDArray[np.float64]]] = coordinate_transform["transforms"] - logger.debug(f"transforms is {transforms}") - old_coordinate_transforms = [ - transforms[str(dim)] for dim in arr.dims if dim not in target_coordinates - ] - logger.debug(f"old_coordinate_transforms: {old_coordinate_transforms}") - - output_shape = [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]] - - def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray[np.float64]: - logger.debug(f"transform function is {transform}") - return np.reshape( - transform(*meshed_coordinates), - output_shape, - order="C", - ) - - old_dimensions = [compute_coordinate(tr) for tr in old_coordinate_transforms] - - ordered_transformations = [transforms[str(dim)] for dim in arr.dims] - transformed_coordinates = [tr(*meshed_coordinates) for tr in ordered_transformations] - - converted_volume = ( - grid_interpolator(np.array(transformed_coordinates).T) - if not isinstance(grid_interpolator, Interpolator) - else grid_interpolator(transformed_coordinates) - ) - - # Wrap it all up - def acceptable_coordinate(c: NDArray[np.float64] | xr.DataArray) -> bool: - """Return True if the dim of array is subset of dim of coordinate_transform. - - Currently we do this to filter out coordinates - that are functions of the old angular dimensions, - we could forward convert these, but right now we do not - - Args: - c (xr.DataArray): DataArray for check. - - Returns: bool - Return True if the dim of array is subset of dim of coordinate_transform. - """ - if isinstance(c, xr.DataArray): - return set(c.dims).issubset(coordinate_transform["dims"]) - return True - - target_coordinates = {k: v for k, v in target_coordinates.items() if acceptable_coordinate(v)} - data = xr.DataArray( - np.reshape( - converted_volume, - [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]], - order="C", - ), - target_coordinates, - coordinate_transform["dims"], - attrs=arr.attrs, - ) - if as_dataset: - old_mapped_coords = [ - xr.DataArray( - values, - coords=target_coordinates, - dims=coordinate_transform["dims"], - attrs=arr.attrs, - ) - for values in old_dimensions - ] - variables = {"data": data} - variables.update( - dict( - zip( - old_coord_names, - old_mapped_coords, - strict=True, - ), - ), - ) - return xr.Dataset(variables, attrs=arr.attrs) - - return data - - -def _chunk_convert( - arr: xr.DataArray, - bounds: dict[MOMENTUM, tuple[float, float]] | None = None, - resolution: dict[MOMENTUM, float] | None = None, - calibration: DetectorCalibration | None = None, - coords: KspaceCoords | None = None, - **kwargs: Unpack[KspaceCoords], -) -> xr.DataArray: - DESIRED_CHUNK_SIZE = 1000 * 1000 * 20 - TOO_LARGE_CHUNK_SIZE = 100 - n_chunks: np.int_ = np.prod(arr.shape) // DESIRED_CHUNK_SIZE - if n_chunks == 0: - warnings.warn( - "Data size is sufficiently small, set allow_chunks=False", - stacklevel=2, - ) - n_chunks += 1 - - if n_chunks > TOO_LARGE_CHUNK_SIZE: - warnings.warn( - "Input array is very large. Please consider resampling.", - stacklevel=2, - ) - chunk_thickness = np.max(len(arr.eV) // n_chunks, 1) - logger.debug(f"Chunking along energy: {n_chunks}, thickness {chunk_thickness}") - finished = [] - low_idx = 0 - high_idx = chunk_thickness - while low_idx < len(arr.eV): - chunk = arr.isel(eV=slice(low_idx, high_idx)) - if len(chunk.eV) == 1: - chunk = chunk.squeeze(dim="eV") - kchunk = convert_to_kspace( - arr=chunk, - bounds=bounds, - resolution=resolution, - calibration=calibration, - coords=coords, - allow_chunks=False, - **kwargs, - ) - if "eV" not in kchunk.dims: - kchunk = kchunk.expand_dims("eV") - assert isinstance(kchunk, xr.DataArray) - finished.append(kchunk) - low_idx = high_idx - high_idx = min(len(arr.eV), high_idx + chunk_thickness) - return xr.concat(finished, dim="eV") diff --git a/tests/conftest.py b/tests/conftest.py index c4e6296d..9267101b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,8 +9,7 @@ import pytest from lmfit.models import ConstantModel, LinearModel, LorentzianModel, QuadraticModel -import arpes -import arpes.endstations +# import arpes.endstations from arpes.configuration.manager import config_manager from arpes.fits import AffineBroadenedFD from arpes.io import example_data @@ -193,6 +192,9 @@ class Sandbox: @pytest.fixture def sandbox_configuration() -> Iterator[Sandbox]: """Generates a sandboxed configuration of the ARPES data analysis suite.""" + from arpes import plugin_loader # noqa: PLC0415 + from arpes.endstations import registry # noqa: PLC0415 + resources_dir = Path.cwd() / "tests" / "resources" def set_workspace(name: str) -> None: @@ -217,8 +219,8 @@ def load(path: str) -> xr.DataArray | xr.Dataset: with_workspace=set_workspace, load=load, ) - arpes.plugin_loader.load_plugins() + plugin_loader.load_plugins() yield sandbox config_manager.config["WORKSPACE"] = None # arpes.config.CONFIG["WORKSPACE"] = None - arpes.endstations.registry._ENDSTATION_ALIASES = {} + registry._ENDSTATION_ALIASES = {} diff --git a/tests/test_basic_data_loading.py b/tests/test_basic_data_loading.py index 8273f617..c6bcdcf5 100644 --- a/tests/test_basic_data_loading.py +++ b/tests/test_basic_data_loading.py @@ -416,22 +416,24 @@ class TestMetadata: "spectrum_type": "cut", "experimenter": None, "sample": None, - }, + }, "experiment_info": { - "temperature": np.nan, - "temperature_cryotip": np.nan, - "pressure": np.nan, - "polarization": (np.nan, np.nan), - "photon_flux": np.nan, - "photocurrent": np.nan, - "probe": None, - "probe_detail": None, - "analyzer_detail": {"analyzer_name": "Specs PHOIBOS 225", - "parallel_deflectors": True, - "perpendicular_deflectors": True, - "analyzer_type": "hemispherical", - "analyzer_radius": 225}, - }, + "temperature": np.nan, + "temperature_cryotip": np.nan, + "pressure": np.nan, + "polarization": (np.nan, np.nan), + "photon_flux": np.nan, + "photocurrent": np.nan, + "probe": None, + "probe_detail": None, + "analyzer_detail": { + "analyzer_name": "Specs PHOIBOS 225", + "parallel_deflectors": True, + "perpendicular_deflectors": True, + "analyzer_type": "hemispherical", + "analyzer_radius": 225, + }, + }, "analyzer_info": { "lens_mode": "WideAngleMode:400V", "lens_mode_name": None, @@ -446,14 +448,16 @@ class TestMetadata: "work_function": 4.5, }, "beamline_info": { - "hv": pytest.approx(60.0), + "hv": pytest.approx(60.0), "linewidth": np.nan, "photon_polarization": (np.nan, np.nan), - "undulator_info": {"gap": None, - "z": None, - "harmonic": None, - "polarization": None, - "type": None}, + "undulator_info": { + "gap": None, + "z": None, + "harmonic": None, + "polarization": None, + "type": None, + }, "repetition_rate": np.nan, "beam_current": np.nan, "entrance_slit": None, @@ -468,10 +472,12 @@ class TestMetadata: "prebinning": {}, "trapezoidal_correction_strategy": None, "dither_settings": None, - "sweep_settings": {"high_energy": None, - "low_energy": None, - "n_sweeps": None, - "step": None}, + "sweep_settings": { + "high_energy": None, + "low_energy": None, + "n_sweeps": None, + "step": None, + }, "frames_per_slice": np.nan, "frame_duration": np.nan, }, @@ -897,7 +903,7 @@ class TestBasicDataLoading: }, }, ), - # Solaris, Phelix beamline + # Solaris, Phelix beamline ( "phelix_load_cut", { diff --git a/tests/test_conversion_core.py b/tests/test_conversion_core.py index 58f9ca7e..c7f2d125 100644 --- a/tests/test_conversion_core.py +++ b/tests/test_conversion_core.py @@ -3,10 +3,8 @@ import xarray as xr from scipy.interpolate import RegularGridInterpolator -from arpes.utilities.conversion.core import ( - convert_to_kspace, - grid_interpolator_from_dataarray, -) +from arpes.utilities.conversion.api import convert_to_kspace +from arpes.utilities.conversion.core import grid_interpolator_from_dataarray # FILE: tests/test_core.py diff --git a/tests/test_correction_intensity_map.py b/tests/test_correction_intensity_map.py index 474dfd61..ee875be9 100644 --- a/tests/test_correction_intensity_map.py +++ b/tests/test_correction_intensity_map.py @@ -6,7 +6,7 @@ @pytest.fixture -def sample_data(): +def sample_data() -> xr.DataArray: x = np.linspace(0, 10, 11) y = np.linspace(0, 5, 6) z = np.random.rand(len(y), len(x)) @@ -14,7 +14,7 @@ def sample_data(): @pytest.fixture -def sample_data3D(): +def sample_data3d() -> xr.DataArray: x = np.linspace(0, 10, 11) y = np.linspace(0, 5, 6) w = np.linspace(0, 1, 6) # Adding a third dimension for testing @@ -22,9 +22,11 @@ def sample_data3D(): return xr.DataArray(z, coords={"y": y, "x": x, "w": w}, dims=["y", "x", "w"]) -def test_shift_with_xrdataarray(sample_data): +def test_shift_with_xrdataarray(sample_data: xr.DataArray): shift_vals = xr.DataArray( - np.ones(sample_data.sizes["y"]), coords={"y": sample_data.coords["y"]}, dims=["y"], + np.ones(sample_data.sizes["y"]), + coords={"y": sample_data.coords["y"]}, + dims=["y"], ) out = shift(sample_data, shift_vals, shift_axis="x", shift_coords=False) assert isinstance(out, xr.DataArray) @@ -32,28 +34,30 @@ def test_shift_with_xrdataarray(sample_data): np.testing.assert_array_equal(out.coords["y"], sample_data.coords["y"]) -def test_shift_with_xrdataarray_shift_coords(sample_data): +def test_shift_with_xrdataarray_shift_coords(sample_data: xr.DataArray): shift_vals = xr.DataArray( - np.ones(sample_data.sizes["y"]), coords={"y": sample_data.coords["y"]}, dims=["y"], + np.ones(sample_data.sizes["y"]), + coords={"y": sample_data.coords["y"]}, + dims=["y"], ) out = shift(sample_data, shift_vals, shift_axis="x", shift_coords=True) assert not np.allclose(out.coords["x"], sample_data.coords["x"]) -def test_shift_with_ndarray(sample_data): +def test_shift_with_ndarray(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y") assert out.shape == sample_data.shape -def test_shift_with_ndarray_missing_by_axis(sample_data): +def test_shift_with_ndarray_missing_by_axis(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) # This should succeed because the function infers by_axis when 2D out = shift(sample_data, shift_vals, shift_axis="x") assert out.shape == sample_data.shape -def test_shift_coords_alignment(sample_data): +def test_shift_coords_alignment(sample_data: xr.DataArray): shift_vals = np.linspace(-1, 1, sample_data.sizes["y"]) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", shift_coords=True) mean_shift = np.mean(shift_vals) @@ -61,28 +65,28 @@ def test_shift_coords_alignment(sample_data): np.testing.assert_allclose(out.coords["x"], expected_coords, atol=1e-6) -def test_shift_extend_coords_min(sample_data): +def test_shift_extend_coords_min(sample_data: xr.DataArray): shift_vals = np.full(sample_data.sizes["y"], 5.0) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", extend_coords=True) assert out.sizes["x"] > sample_data.sizes["x"] -def test_shift_extend_coords_max(sample_data): +def test_shift_extend_coords_max(sample_data: xr.DataArray): shift_vals = np.full(sample_data.sizes["y"], -5.0) out = shift(sample_data, shift_vals, shift_axis="x", by_axis="y", extend_coords=True) assert out.sizes["x"] > sample_data.sizes["x"] -def test_shift_axis_required(sample_data): +def test_shift_axis_required(sample_data: xr.DataArray): shift_vals = np.ones(sample_data.sizes["y"]) with pytest.raises(AssertionError): shift(sample_data, shift_vals, shift_axis="") -def test_shift_by_axis_required_for_ndarray(sample_data3D): - shift_vals = np.ones(sample_data3D.sizes["x"]) # Not matching y +def test_shift_by_axis_required_for_ndarray(sample_data3d: xr.DataArray): + shift_vals = np.ones(sample_data3d.sizes["x"]) # Not matching y with pytest.raises(TypeError): - shift(sample_data3D, shift_vals, shift_axis="y") + shift(sample_data3d, shift_vals, shift_axis="y") def test_shift_with_integer_array(): From ec57fc0dbb659215d11db6699eedeacd724448a3 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Tue, 30 Dec 2025 14:28:25 +0900 Subject: [PATCH 14/16] =?UTF-8?q?=F0=9F=8E=A8=20=20np.float64=20->=20np.fl?= =?UTF-8?q?oating?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/_typing/attrs_property.py | 28 +++--- src/arpes/_typing/base.py | 2 +- src/arpes/_typing/plotting.py | 12 +-- src/arpes/_typing/utils.py | 4 +- src/arpes/analysis/deconvolution.py | 8 +- src/arpes/analysis/derivative.py | 6 +- src/arpes/analysis/filters.py | 2 +- src/arpes/analysis/forward_conversion.py | 26 +++--- src/arpes/analysis/general.py | 2 +- src/arpes/analysis/pocket.py | 2 +- src/arpes/analysis/self_energy.py | 4 +- src/arpes/analysis/spectrum_edges.py | 32 +++---- src/arpes/correction/angle_unit.py | 2 +- src/arpes/correction/coords.py | 6 +- src/arpes/correction/intensity_map.py | 22 ++--- src/arpes/correction/trapezoid.py | 63 +++++++------ src/arpes/deep_learning/io.py | 4 +- src/arpes/endstations/fits_utils.py | 4 +- src/arpes/endstations/plugin/SSRF_NSRL.py | 2 +- src/arpes/endstations/prodigy_itx.py | 8 +- src/arpes/endstations/prodigy_sp2.py | 2 +- src/arpes/example_data/mock.py | 4 +- src/arpes/fits/fit_models/bands.py | 8 +- src/arpes/fits/fit_models/decay.py | 16 ++-- src/arpes/fits/fit_models/dirac.py | 8 +- src/arpes/fits/fit_models/fermi_edge.py | 28 +++--- src/arpes/fits/fit_models/functional_forms.py | 28 +++--- src/arpes/fits/fit_models/misc.py | 16 ++-- src/arpes/fits/fit_models/two_dimensional.py | 12 +-- src/arpes/models/band.py | 14 +-- src/arpes/plotting/coordinates.py | 2 +- src/arpes/plotting/dispersion.py | 8 +- src/arpes/plotting/false_color.py | 2 +- src/arpes/plotting/fermi_surface.py | 2 +- src/arpes/plotting/movie.py | 12 +-- src/arpes/plotting/parameter.py | 2 +- src/arpes/plotting/spin.py | 2 +- src/arpes/plotting/stack_plot.py | 6 +- src/arpes/plotting/utils.py | 14 +-- src/arpes/preparation/axis.py | 6 +- src/arpes/preparation/tof.py | 8 +- src/arpes/provenance.py | 2 +- src/arpes/simulation.py | 38 ++++---- src/arpes/utilities/bz.py | 14 +-- src/arpes/utilities/conversion/api.py | 2 +- src/arpes/utilities/conversion/base.py | 14 +-- .../conversion/bounds_calculations.py | 46 +++++----- src/arpes/utilities/conversion/calibration.py | 6 +- src/arpes/utilities/conversion/coordinates.py | 14 +-- src/arpes/utilities/conversion/core.py | 4 +- src/arpes/utilities/conversion/fast_interp.py | 33 +++---- .../utilities/conversion/kx_ky_conversion.py | 88 ++++++++++--------- .../utilities/conversion/kz_conversion.py | 44 +++++----- src/arpes/utilities/funcutils.py | 2 +- src/arpes/utilities/selections.py | 4 +- .../xarray_extensions/_helper/general.py | 6 +- .../xarray_extensions/accessor/general.py | 50 +++++------ .../xarray_extensions/accessor/property.py | 2 +- tests/test_analysis_background.py | 4 +- 59 files changed, 413 insertions(+), 399 deletions(-) diff --git a/src/arpes/_typing/attrs_property.py b/src/arpes/_typing/attrs_property.py index 8bf01456..c5ac8ed1 100644 --- a/src/arpes/_typing/attrs_property.py +++ b/src/arpes/_typing/attrs_property.py @@ -172,15 +172,15 @@ class DAQInfo(TypedDict, total=False): class Coordinates(TypedDict, total=False): """TypedDict for attrs.""" - x: NDArray[np.float64] | float - y: NDArray[np.float64] | float - z: NDArray[np.float64] | float - alpha: NDArray[np.float64] | float - beta: NDArray[np.float64] | float - chi: NDArray[np.float64] | float - theta: NDArray[np.float64] | float - psi: NDArray[np.float64] | float - phi: NDArray[np.float64] | float + x: NDArray[np.floating] | float + y: NDArray[np.floating] | float + z: NDArray[np.floating] | float + alpha: NDArray[np.floating] | float + beta: NDArray[np.floating] | float + chi: NDArray[np.floating] | float + theta: NDArray[np.floating] | float + psi: NDArray[np.floating] | float + phi: NDArray[np.floating] | float class Spectrometer(AnalyzerInfo, Coordinates, DAQInfo, total=False): @@ -215,11 +215,11 @@ class ARPESAttrs(Spectrometer, LightSourceInfo, SampleInfo, total=False): class KspaceCoords(TypedDict, total=False): - eV: NDArray[np.float64] - kp: NDArray[np.float64] - kx: NDArray[np.float64] - ky: NDArray[np.float64] - kz: NDArray[np.float64] + eV: NDArray[np.floating] + kp: NDArray[np.floating] + kx: NDArray[np.floating] + ky: NDArray[np.floating] + kz: NDArray[np.floating] CoordsOffset: TypeAlias = Literal[ diff --git a/src/arpes/_typing/base.py b/src/arpes/_typing/base.py index 66a8922b..31c470c5 100644 --- a/src/arpes/_typing/base.py +++ b/src/arpes/_typing/base.py @@ -41,4 +41,4 @@ AnalysisRegion = Literal["copper_prior", "wide_angular", "narrow_angular"] -SelType = float | str | slice | list[float | str] | NDArray[np.float64] +SelType = float | str | slice | list[float | str] | NDArray[np.floating] diff --git a/src/arpes/_typing/plotting.py b/src/arpes/_typing/plotting.py index ad6043de..fb43b547 100644 --- a/src/arpes/_typing/plotting.py +++ b/src/arpes/_typing/plotting.py @@ -61,7 +61,7 @@ class Line2DProperty(TypedDict, total=False): - agg_filter: Callable[[NDArray[np.float64], int], tuple[NDArray[np.float64], int, int]] + agg_filter: Callable[[NDArray[np.floating], int], tuple[NDArray[np.floating], int, int]] alpha: float | None animated: bool antialiased: bool | list[bool] @@ -119,7 +119,7 @@ class PolyCollectionProperty(Line2DProperty, total=False): norm: Normalize | None offset_transform: Transform # offsets: (N, 2) or (2, ) array-like - sizes: NDArray[np.float64] | None + sizes: NDArray[np.floating] | None transform: Transform urls: list[str] | None @@ -127,7 +127,7 @@ class PolyCollectionProperty(Line2DProperty, total=False): class MPLPlotKwargsBasic(TypedDict, total=False): """Kwargs for Axes.plot & Axes.fill_between.""" - agg_filter: Callable[[NDArray[np.float64], int], tuple[NDArray[np.float64], int, int]] + agg_filter: Callable[[NDArray[np.floating], int], tuple[NDArray[np.floating], int, int]] alpha: float | None animated: bool antialiased: bool | list[bool] @@ -183,8 +183,8 @@ class MPLPlotKwargs(MPLPlotKwargsBasic, total=False): randomness: float solid_capstyle: CapStyleType solid_joinstyle: JoinStyleType - xdata: NDArray[np.float64] - ydata: NDArray[np.float64] + xdata: NDArray[np.floating] + ydata: NDArray[np.floating] zorder: float @@ -247,7 +247,7 @@ class ColorbarParam(TypedDict, total=False): class MPLTextParam(TypedDict, total=False): - agg_filter: Callable[[NDArray[np.float64], int], tuple[NDArray[np.float64], int, int]] + agg_filter: Callable[[NDArray[np.floating], int], tuple[NDArray[np.floating], int, int]] alpha: float | None animated: bool antialiased: bool diff --git a/src/arpes/_typing/utils.py b/src/arpes/_typing/utils.py index 00f66245..d14ebf21 100644 --- a/src/arpes/_typing/utils.py +++ b/src/arpes/_typing/utils.py @@ -48,12 +48,12 @@ def flatten_literals(literal_type: Incomplete) -> set[str]: def is_dict_kspacecoords( - a_dict: dict[Hashable, NDArray[np.float64]] | dict[str, NDArray[np.float64]], + a_dict: dict[Hashable, NDArray[np.floating]] | dict[str, NDArray[np.floating]], ) -> TypeGuard[KspaceCoords]: """Checks if a dictionary contains k-space coordinates. Args: - a_dict (dict[Hashable, NDArray[np.float64]] | dict[str, NDArray[np.float64]]): + a_dict (dict[Hashable, NDArray[np.floating]] | dict[str, NDArray[np.floating]]): The dictionary to check. Returns: diff --git a/src/arpes/analysis/deconvolution.py b/src/arpes/analysis/deconvolution.py index b5f6b3fc..65400024 100644 --- a/src/arpes/analysis/deconvolution.py +++ b/src/arpes/analysis/deconvolution.py @@ -38,7 +38,7 @@ @update_provenance("Approximate Iterative Deconvolution") def deconvolve_ice( data: xr.DataArray, - psf: NDArray[np.float64], + psf: NDArray[np.floating], n_iterations: int = 5, deg: int | None = None, ) -> xr.DataArray: @@ -57,7 +57,7 @@ def deconvolve_ice( The deconvoled data in the same format. """ data = data if isinstance(data, xr.DataArray) else normalize_to_spectrum(data) - arr: NDArray[np.float64] = data.values + arr: NDArray[np.floating] = data.values if deg is None: deg = n_iterations - 3 iteration_steps = list(range(1, n_iterations + 1)) @@ -165,12 +165,12 @@ def make_psf( if fwhm: sigmas = {k: v / (2 * np.sqrt(2 * np.log(2))) for k, v in sigmas.items()} - cov: NDArray[np.float64] = np.zeros((len(sigmas), len(sigmas)), dtype=np.float64) + cov: NDArray[np.floating] = np.zeros((len(sigmas), len(sigmas)), dtype=np.float64) for i, dim in enumerate(data.dims): cov[i][i] = sigmas[dim] ** 2 # sigma is deviation, but multivariate_normal uses covariant logger.debug(f"cov: {cov}") - psf_coords: dict[Hashable, NDArray[np.float64]] = {} + psf_coords: dict[Hashable, NDArray[np.floating]] = {} for k in data.dims: psf_coords[str(k)] = np.linspace( -(pixels[str(k)] - 1) / 2 * strides[str(k)], diff --git a/src/arpes/analysis/derivative.py b/src/arpes/analysis/derivative.py index 540668e7..2e9f36aa 100644 --- a/src/arpes/analysis/derivative.py +++ b/src/arpes/analysis/derivative.py @@ -45,10 +45,10 @@ def _nothing_to_array(x: xr.DataArray) -> xr.DataArray: def _vector_diff( - arr: NDArray[np.float64], + arr: NDArray[np.floating], delta: tuple[DELTA, DELTA], n: int = 1, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Computes finite differences along the vector delta, given as a tuple. Using delta = (0, 1) is equivalent to np.diff(..., axis=1), while @@ -138,7 +138,7 @@ def _gradient_modulus( """ spectrum = data if isinstance(data, xr.DataArray) else normalize_to_spectrum(data) assert isinstance(spectrum, xr.DataArray) - values: NDArray[np.float64] = spectrum.values + values: NDArray[np.floating] = spectrum.values gradient_vector = np.zeros(shape=(8, *values.shape)) gradient_vector[0, :-delta, :] = _vector_diff(values, (delta, 0)) diff --git a/src/arpes/analysis/filters.py b/src/arpes/analysis/filters.py index b2d93bdd..17517839 100644 --- a/src/arpes/analysis/filters.py +++ b/src/arpes/analysis/filters.py @@ -117,7 +117,7 @@ def boxcar_filter_arr( if dim not in integered_size or integered_size[str(dim)] == 0: integered_size[str(dim)] = default_size widths_pixel: tuple[int, ...] = tuple([integered_size[str(k)] for k in arr.dims]) - array_values: NDArray[np.float64] = np.nan_to_num(arr.values, nan=0.0, copy=True) + array_values: NDArray[np.floating] = np.nan_to_num(arr.values, nan=0.0, copy=True) for _ in range(iteration_n): array_values = ndimage.uniform_filter( diff --git a/src/arpes/analysis/forward_conversion.py b/src/arpes/analysis/forward_conversion.py index 4bb3833e..74daeec5 100644 --- a/src/arpes/analysis/forward_conversion.py +++ b/src/arpes/analysis/forward_conversion.py @@ -53,7 +53,7 @@ LOGLEVEL = LOGLEVELS[1] logger = setup_logger(__name__, LOGLEVEL) -A = TypeVar("A", NDArray[np.float64], float) +A = TypeVar("A", NDArray[np.floating], float) def convert_coordinate_forward( @@ -154,11 +154,11 @@ def convert_through_angular_pair( # noqa: PLR0913 data: xr.DataArray, first_point: dict[Hashable, float], second_point: dict[Hashable, float], - cut_specification: dict[str, NDArray[np.float64]], - transverse_specification: dict[str, NDArray[np.float64]], + cut_specification: dict[str, NDArray[np.floating]], + transverse_specification: dict[str, NDArray[np.floating]], *, relative_coords: bool = True, - **k_coords: NDArray[np.float64], + **k_coords: NDArray[np.floating], ) -> xr.DataArray: """Converts the lower dimensional ARPES cut passing through `first_point` and `second_point`. @@ -263,11 +263,11 @@ def convert_through_angular_pair( # noqa: PLR0913 def convert_through_angular_point( data: xr.DataArray, coords: dict[Hashable, float], - cut_specification: dict[str, NDArray[np.float64]], - transverse_specification: dict[str, NDArray[np.float64]], + cut_specification: dict[str, NDArray[np.floating]], + transverse_specification: dict[str, NDArray[np.floating]], *, relative_coords: bool = True, - **k_coords: NDArray[np.float64], + **k_coords: NDArray[np.floating], ) -> xr.DataArray: """Converts the lower dimensional ARPES cut passing through given angular `coords`. @@ -332,13 +332,13 @@ def convert_coordinates( ) -> xr.Dataset: """Converts coordinates forward in momentum.""" - def unwrap_coord(coord: xr.DataArray | float) -> NDArray[np.float64] | float: + def unwrap_coord(coord: xr.DataArray | float) -> NDArray[np.floating] | float: if isinstance(coord, xr.DataArray): return coord.values return coord coord_names: set[str] = {"phi", "psi", "alpha", "theta", "beta", "chi", "hv", "eV"} - raw_coords: dict[str, NDArray[np.float64] | float] = { + raw_coords: dict[str, NDArray[np.floating] | float] = { k: unwrap_coord(arr.S.lookup_offset_coord(k)) for k in coord_names } raw_angles = {k: v for k, v in raw_coords.items() if k not in {"eV", "hv"}} @@ -357,8 +357,8 @@ def unwrap_coord(coord: xr.DataArray | float) -> NDArray[np.float64] | float: def expand_to( cname: str, - c: NDArray[np.float64] | float, - ) -> NDArray[np.float64] | float: + c: NDArray[np.floating] | float, + ) -> NDArray[np.floating] | float: if isinstance(c, float): return c assert isinstance(c, np.ndarray) @@ -460,7 +460,7 @@ def convert_coordinates_to_kspace_forward(arr: XrTypes) -> xr.Dataset: ("chi", "hv", "phi"): ["kx", "ky", "kz"], }.get(tupled_momentum_compatibles, []) full_old_dims: list[str] = [*momentum_compatibles, "eV"] - projection_vectors: NDArray[np.float64] = np.ndarray( + projection_vectors: NDArray[np.floating] = np.ndarray( shape=tuple(len(arr.coords[d]) for d in full_old_dims), dtype=object, ) @@ -549,7 +549,7 @@ def _broadcast_by_dim_location( data: xr.DataArray, target_shape: tuple[int, ...], dim_location: int | None = None, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: if isinstance(data, xr.DataArray) and not data.dims: data = data.item() if isinstance( diff --git a/src/arpes/analysis/general.py b/src/arpes/analysis/general.py index a7372e67..460f8b2b 100644 --- a/src/arpes/analysis/general.py +++ b/src/arpes/analysis/general.py @@ -62,7 +62,7 @@ def fit_fermi_edge( @update_provenance("Normalized by the 1/Fermi Dirac Distribution at sample temp") def normalize_by_fermi_distribution( data: xr.DataArray, - max_gain: float | np.float64 = 0, + max_gain: float | np.floating = 0.0, rigid_shift: float = 0, instrumental_broadening: float = 0, total_broadening: float = 0, diff --git a/src/arpes/analysis/pocket.py b/src/arpes/analysis/pocket.py index 415b5f83..676a783d 100644 --- a/src/arpes/analysis/pocket.py +++ b/src/arpes/analysis/pocket.py @@ -194,7 +194,7 @@ def curves_along_pocket( center_point: dict[Hashable, float] = {k: v for k, v in kwargs.items() if k in data.dims} - center_as_vector: NDArray[np.float64] = np.array( + center_as_vector: NDArray[np.floating] = np.array( [center_point.get(dim_name, 0.0) for dim_name in fermi_surface_dims], ) diff --git a/src/arpes/analysis/self_energy.py b/src/arpes/analysis/self_energy.py index 3649ef76..22359b0c 100644 --- a/src/arpes/analysis/self_energy.py +++ b/src/arpes/analysis/self_energy.py @@ -149,7 +149,7 @@ def estimate_bare_band( def quasiparticle_lifetime( self_energy: xr.DataArray, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates the quasiparticle mean free path in meters (meters!). The bare band is used to calculate the band/Fermi velocity @@ -169,7 +169,7 @@ def quasiparticle_lifetime( def quasiparticle_mean_free_path( self_energy: xr.DataArray, bare_band: xr.DataArray, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: lifetime = quasiparticle_lifetime(self_energy) return lifetime * local_fermi_velocity(bare_band) diff --git a/src/arpes/analysis/spectrum_edges.py b/src/arpes/analysis/spectrum_edges.py index ddd4da7f..59170765 100644 --- a/src/arpes/analysis/spectrum_edges.py +++ b/src/arpes/analysis/spectrum_edges.py @@ -33,7 +33,7 @@ def find_spectrum_energy_edges( data: xr.DataArray, *, indices: bool = False, -) -> NDArray[np.float64] | NDArray[np.int_]: +) -> NDArray[np.floating] | NDArray[np.int_]: """Compute the angular edges of the spectrum over the specified energy range. This method identifies the low and high angular edges for each slice of the spectrum @@ -51,7 +51,7 @@ def find_spectrum_energy_edges( resolution for edge detection. Defaults to 0.05. Returns: - tuple[NDArray[np.float64], NDArray[np.float64], xr.DataArray]: + tuple[NDArray[np.floating], NDArray[np.floating], xr.DataArray]: - If `indices=True`: - Low edge indices. - High edge indices. @@ -85,7 +85,7 @@ def find_spectrum_energy_edges( energy_marginal = data.sum([d for d in data.dims if d != "eV"]) embed_size = 20 - embedded: NDArray[np.float64] = np.ndarray(shape=[embed_size, energy_marginal.sizes["eV"]]) + embedded: NDArray[np.floating] = np.ndarray(shape=[embed_size, energy_marginal.sizes["eV"]]) embedded[:] = energy_marginal.values embedded = ndi.gaussian_filter(embedded, embed_size / 3) @@ -108,7 +108,7 @@ def find_spectrum_angular_edges( *, angle_name: str = "phi", indices: bool = False, -) -> NDArray[np.float64] | NDArray[np.int_]: +) -> NDArray[np.floating] | NDArray[np.int_]: """Return angle position corresponding to the (1D) spectrum edge. Args: @@ -116,7 +116,7 @@ def find_spectrum_angular_edges( angle_name (str): Angle name to find the edge indices (bool): If True, return the index not the angle value. - Returns: NDArray[np.float64] | NDArray[np.int_] + Returns: NDArray[np.floating] | NDArray[np.int_] Angle position """ angular_dim: str = "pixel" if "pixel" in data.dims else angle_name @@ -126,7 +126,7 @@ def find_spectrum_angular_edges( ) embed_size = 20 - embedded: NDArray[np.float64] = np.ndarray( + embedded: NDArray[np.floating] = np.ndarray( shape=[embed_size, phi_marginal.sizes[angular_dim]], ) embedded[:] = phi_marginal.values @@ -153,7 +153,7 @@ def find_spectrum_angular_edges_full( *, indices: bool = False, energy_division: float = 0.05, -) -> tuple[NDArray[np.float64], NDArray[np.float64], xr.DataArray]: +) -> tuple[NDArray[np.floating], NDArray[np.floating], xr.DataArray]: """Finds the angular edges of the spectrum based on energy slicing and rebinning. This method uses edge detection techniques to identify boundaries in the angular dimension. @@ -167,9 +167,9 @@ def find_spectrum_angular_edges_full( Returns: tuple: A tuple containing: - - low_edges (NDArray[np.float64]): Values or indices of the low edges + - low_edges (NDArray[np.floating]): Values or indices of the low edges of the spectrum. - - high_edges (NDArray[np.float64]): Values or indices of the high edges + - high_edges (NDArray[np.floating]): Values or indices of the high edges of the spectrum. - eV_coords (xr.DataArray): The coordinates of the rebinned energy axis. @@ -181,8 +181,8 @@ def find_spectrum_angular_edges_full( # down to this region # we will then find the appropriate edge for each slice, and do a fit to the edge locations energy_edge = find_spectrum_energy_edges(data) - low_edge: np.float64 = np.min(energy_edge) + energy_division - high_edge: np.float64 = np.max(energy_edge) - energy_division + low_edge: np.floating = np.min(energy_edge) + energy_division + high_edge: np.floating = np.max(energy_edge) - energy_division if high_edge - low_edge < 3 * energy_division: # Doesn't look like the automatic inference of the energy edge was valid @@ -199,7 +199,7 @@ def find_spectrum_angular_edges_full( rebinned = rebin(energy_cut, shape=new_shape) embed_size = 20 - embedded: NDArray[np.float64] = np.empty( + embedded: NDArray[np.floating] = np.empty( shape=[embed_size, rebinned.sizes[angular_dim]], ) low_edges = [] @@ -236,8 +236,8 @@ def zero_spectrometer_edges( data: xr.DataArray, cut_margin: int = 0, interp_range: float | None = None, - low: Sequence[float] | NDArray[np.float64] | None = None, - high: Sequence[float] | NDArray[np.float64] | None = None, + low: Sequence[float] | NDArray[np.floating] | None = None, + high: Sequence[float] | NDArray[np.floating] | None = None, ) -> xr.DataArray: """Zeros out the spectrum data outside of the specified low and high edges. @@ -251,10 +251,10 @@ def zero_spectrometer_edges( Defaults to 50 pixels or 0.08 in angular units, depending on the data type. interp_range (float or None, optional): Specifies the interpolation range for edge data. If provided, the edge values are interpolated within this range. - low (Sequence[float], NDArray[np.float64], or None, optional): Low edge values. + low (Sequence[float], NDArray[np.floating], or None, optional): Low edge values. Use this to manually specify the low edge. Defaults to None. (automatically determined). - high (Sequence[float], NDArray[np.float64], or None, optional): High edge values. + high (Sequence[float], NDArray[np.floating], or None, optional): High edge values. Use this to manually specify the high edge. Defaults to None. (automatically determined). diff --git a/src/arpes/correction/angle_unit.py b/src/arpes/correction/angle_unit.py index d1e12752..a5d9a229 100644 --- a/src/arpes/correction/angle_unit.py +++ b/src/arpes/correction/angle_unit.py @@ -11,7 +11,7 @@ from arpes._typing.utils import flatten_literals from arpes.xarray_extensions.accessor.spectrum_type import AngleUnit -AngleValue = TypeVar("AngleValue", float, np.float64, NDArray[np.float64]) +AngleValue = TypeVar("AngleValue", float, np.floating, NDArray[np.floating]) def radian_to_degree(data: xr.DataArray) -> xr.DataArray: diff --git a/src/arpes/correction/coords.py b/src/arpes/correction/coords.py index e9f67fba..e5e7b8ce 100644 --- a/src/arpes/correction/coords.py +++ b/src/arpes/correction/coords.py @@ -38,7 +38,7 @@ def adjust_coords_to_limit( da: xr.DataArray, new_limits: Mapping[Hashable, float], -) -> dict[Hashable, NDArray[np.float64]]: +) -> dict[Hashable, NDArray[np.floating]]: """Extend the coordinates of an xarray DataArray to given values for each dimension. The extension will ensure that the new coordinates cover up to the given extension value, @@ -80,7 +80,7 @@ def adjust_coords_to_limit( def extend_coords( da: xr.DataArray, - new_coords: Mapping[Hashable, list[float] | NDArray[np.float64]], + new_coords: Mapping[Hashable, list[float] | NDArray[np.floating]], ) -> xr.DataArray: """Expand the coordinates of an xarray DataArray by adding new coordinate values. @@ -118,7 +118,7 @@ def extend_coords( def is_equally_spaced( - coords: xr.DataArray | NDArray[np.float64], + coords: xr.DataArray | NDArray[np.floating], dim_name: Hashable | None = None, **kwargs: Incomplete, ) -> float: diff --git a/src/arpes/correction/intensity_map.py b/src/arpes/correction/intensity_map.py index a47ba891..279d98ba 100644 --- a/src/arpes/correction/intensity_map.py +++ b/src/arpes/correction/intensity_map.py @@ -46,7 +46,7 @@ class ShiftParam(TypedDict, total=False): def shift( # noqa: PLR0913 data: xr.DataArray, - other: xr.DataArray | NDArray[np.float64], + other: xr.DataArray | NDArray[np.floating], shift_axis: str = "", by_axis: str = "", *, @@ -107,7 +107,7 @@ def shift( # noqa: PLR0913 data = coords.extend_coords(da=data, new_coords=extended_coord) padding_value = 0 if data.dtype == np.int_ else np.nan - shifted_data: NDArray[np.float64] = shift_by( + shifted_data: NDArray[np.floating] = shift_by( arr=data.values, value=shift_amount, axis=data.dims.index(shift_axis), @@ -126,12 +126,12 @@ def shift( # noqa: PLR0913 def _compute_shift_amount( data: xr.DataArray, - other: xr.DataArray | NDArray[np.float64], + other: xr.DataArray | NDArray[np.floating], shift_axis: str, by_axis: str = "", *, shift_coords: bool = False, -) -> tuple[NDArray[np.float64], float, str]: +) -> tuple[NDArray[np.floating], float, str]: """Compute shift amount based on `other` and determine `by_axis` if necessary. Helper function for `shift` @@ -146,7 +146,7 @@ def _compute_shift_amount( shift. Returns: - tuple[NDArray[np.float64], float, str]: + tuple[NDArray[np.floating], float, str]: - shift_amount: The computed shift values. - mean_shift: The mean value of `other` (0 if not shifting coords). - by_axis: The determined `by_axis` name. @@ -184,23 +184,23 @@ def _compute_shift_amount( def shift_by( - arr: NDArray[np.float64], - value: NDArray[np.float64], + arr: NDArray[np.floating], + value: NDArray[np.floating], axis: int = 0, by_axis: int = 0, **kwargs: Unpack[ShiftParam], -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Shifts slices of `arr` perpendicular to `by_axis` by `value`. Args: - arr (NDArray[np.float64): Input array to be shifted. - value (NDArray[np.float64): Array of shift values. + arr (NDArray[np.floating): Input array to be shifted. + value (NDArray[np.floating): Array of shift values. axis (int): Axis number of np.ndarray for shift. by_axis (int): Axis number of np.ndarray for non-shift. **kwargs(ShiftParam): Additional parameters to pass to scipy.ndimage.shift. Returns: - NDArray[np.float64]: The shifted array. + NDArray[np.floating]: The shifted array. """ assert axis != by_axis, "`axis` and `by_axis` must be different." arr_copy = arr.copy() diff --git a/src/arpes/correction/trapezoid.py b/src/arpes/correction/trapezoid.py index 619c4a05..da06f374 100644 --- a/src/arpes/correction/trapezoid.py +++ b/src/arpes/correction/trapezoid.py @@ -44,9 +44,9 @@ @numba.njit(parallel=True) def _phi_to_phi( - energy: NDArray[np.float64], - phi: NDArray[np.float64], - phi_out: NDArray[np.float64], + energy: NDArray[np.floating], + phi: NDArray[np.floating], + phi_out: NDArray[np.floating], corners: typed.typeddict.Dict[str, typed.typeddict.Dict[str, float]], rectangle_phis: list[float], ) -> None: @@ -91,9 +91,9 @@ def _phi_to_phi( @numba.njit(parallel=True) def _phi_to_phi_forward( - energy: NDArray[np.float64], - phi: NDArray[np.float64], - phi_out: NDArray[np.float64], + energy: NDArray[np.floating], + phi: NDArray[np.floating], + phi_out: NDArray[np.floating], corners: typed.typeddict.Dict[str, typed.typeddict.Dict[str, float]], rectangle_phis: list[float], ) -> None: @@ -104,11 +104,11 @@ def _phi_to_phi_forward( a rectangle. Args: - energy : NDArray[np.float64] + energy : NDArray[np.floating] The energy values of the trapezoid. - phi : NDArray[np.float64] + phi : NDArray[np.floating] The phi values of the trapezoid. - phi_out : NDArray[np.float64] + phi_out : NDArray[np.floating] The output phi values of the rectangle. corners : dict[str, dict[str, float]] The corners of the trapezoid, each corner is a dictionary with 'eV' and 'phi' keys. @@ -179,7 +179,7 @@ def get_coordinates( self, resolution: dict[str, float] | None = None, bounds: dict[str, tuple[float, float]] | None = None, - ) -> dict[Hashable, NDArray[np.float64]]: + ) -> dict[Hashable, NDArray[np.floating]]: """Calculates the coordinates which should be used in correced data. Args: @@ -208,8 +208,8 @@ def get_coordinates( logger.debug(f"coordinates: {coordinates}") return coordinates - def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.float64]]: - def _with_identity(*args: NDArray[np.float64]) -> NDArray[np.float64]: + def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.floating]]: + def _with_identity(*args: NDArray[np.floating]) -> NDArray[np.floating]: return self.identity_transform(dim, *args) return { @@ -221,9 +221,9 @@ def _with_identity(*args: NDArray[np.float64]) -> NDArray[np.float64]: def phi_to_phi( self, - binding_energy: NDArray[np.float64], - phi: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + phi: NDArray[np.floating], + ) -> NDArray[np.floating]: """Converts the given phi values to a new phi representation based on binding energy. This method computes the new phi values based on the provided binding energy and phi values, @@ -231,12 +231,12 @@ def phi_to_phi( the existing value. Args: - binding_energy (NDArray[np.float64]): The array of binding energy values. - phi (NDArray[np.float64]): The array of phi values to be converted. + binding_energy (NDArray[np.floating]): The array of binding energy values. + phi (NDArray[np.floating]): The array of phi values to be converted. rectangle_phis (list[float]): max and min of the angle phi in the rectangle. Returns: - NDArray[np.float64]: The transformed phi values. + NDArray[np.floating]: The transformed phi values. Raises: ValueError: If any required attributes are missing or invalid. @@ -255,21 +255,21 @@ def phi_to_phi( def phi_to_phi_forward( self, - binding_energy: NDArray[np.float64], - phi: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + phi: NDArray[np.floating], + ) -> NDArray[np.floating]: """Transforms phi values based on binding energy using a forward method. This method computes the new phi values based on the provided binding energy and phi values, applying a forward transformation. The result is stored in the `phi_out` array. Args: - binding_energy (NDArray[np.float64]): The array of binding energy values. - phi (NDArray[np.float64]): The array of phi values to be converted. + binding_energy (NDArray[np.floating]): The array of binding energy values. + phi (NDArray[np.floating]): The array of phi values to be converted. rectangle_phis (list[float]): max and min of the angle phi in the rectangle. Returns: - NDArray[np.float64]: The transformed phi values after the forward transformation. + NDArray[np.floating]: The transformed phi values after the forward transformation. """ phi_out = np.zeros_like(phi) logger.debug(f"type of self.corners in phi_to_phi_forward : {type(self.corners)}") @@ -431,10 +431,19 @@ def _corners_typed_dict( corners: list[dict[str, float]], ) -> typed.typeddict.Dict[str, typed.typeddict.Dict[str, float]]: normal_dict_corners = _corners(corners) - inter_dict_type = types.DictType(keyty=types.unicode_type, valty=types.float64) - typed_dict_corners = typed.Dict.empty(key_type=types.unicode_type, value_type=inter_dict_type) + inter_dict_type = types.DictType( + keyty=types.unicode_type, + valty=types.float64, + ) + typed_dict_corners = typed.Dict.empty( + key_type=types.unicode_type, + value_type=inter_dict_type, + ) for corner_position, coords in normal_dict_corners.items(): - each_corner = typed.Dict.empty(key_type=types.unicode_type, value_type=types.float64) + each_corner = typed.Dict.empty( + key_type=types.unicode_type, + value_type=types.float64, + ) for coord_name in ["eV", "phi"]: each_corner[coord_name] = coords[coord_name] typed_dict_corners[corner_position] = each_corner diff --git a/src/arpes/deep_learning/io.py b/src/arpes/deep_learning/io.py index fe0e4ac6..76f3ee45 100644 --- a/src/arpes/deep_learning/io.py +++ b/src/arpes/deep_learning/io.py @@ -24,7 +24,7 @@ } -def from_portable_bin(path: Path) -> NDArray[np.float64]: +def from_portable_bin(path: Path) -> NDArray[np.floating]: """Reads data from a relatively portable binary format. A "portable" binary file is a directory containing @@ -45,7 +45,7 @@ def from_portable_bin(path: Path) -> NDArray[np.float64]: return arr.reshape(shape) -def to_portable_bin(arr: NDArray[np.float64], path: Path) -> None: +def to_portable_bin(arr: NDArray[np.floating], path: Path) -> None: """Converts data to a relatively portable binary format. See also `read_portable_bin`. diff --git a/src/arpes/endstations/fits_utils.py b/src/arpes/endstations/fits_utils.py index f659f7a6..b331ae25 100644 --- a/src/arpes/endstations/fits_utils.py +++ b/src/arpes/endstations/fits_utils.py @@ -41,7 +41,7 @@ "Z": "z", } -CoordsDict: TypeAlias = dict[str, NDArray[np.float64]] +CoordsDict: TypeAlias = dict[str, NDArray[np.floating]] Dimension = str @@ -140,7 +140,7 @@ def extract_coords( logger.debug(f"Loop (name, n_regions, size) = {(name, n_regions, n)}") - coord: NDArray[np.float64] = np.array(()) + coord: NDArray[np.float64] = np.empty(0, dtype=np.float64) for region in range(n_regions): start, end, n = ( attrs[f"ST_{loop}_{region}"], diff --git a/src/arpes/endstations/plugin/SSRF_NSRL.py b/src/arpes/endstations/plugin/SSRF_NSRL.py index fb7fa555..099d5db1 100644 --- a/src/arpes/endstations/plugin/SSRF_NSRL.py +++ b/src/arpes/endstations/plugin/SSRF_NSRL.py @@ -44,7 +44,7 @@ __all__ = ("NSRLEndstation", "SSRFEndstation") -def determine_dim(viewer_ini: ConfigParser, dim_name: str) -> tuple[int, NDArray[np.float64], str]: +def determine_dim(viewer_ini: ConfigParser, dim_name: str) -> tuple[int, NDArray[np.floating], str]: """Determine dimension values from from the ini file. Args: diff --git a/src/arpes/endstations/prodigy_itx.py b/src/arpes/endstations/prodigy_itx.py index 0c49553d..dbe58742 100644 --- a/src/arpes/endstations/prodigy_itx.py +++ b/src/arpes/endstations/prodigy_itx.py @@ -59,7 +59,7 @@ def __init__(self, list_style_itx_data: list[str] | None = None) -> None: self.pixels: tuple[int, ...] self.axis_info: dict[str, tuple[IgorSetscaleFlag, float, float, str]] = {} self.wavename: str = "" - self.intensity: NDArray[np.float64] + self.intensity: NDArray[np.floating] if list_style_itx_data is not None: self.parse(list_style_itx_data) @@ -112,7 +112,7 @@ def to_dataarray( "spectrum_type": "cut", "angle_unit": "deg (theta_y)", } - coords: dict[str, NDArray[np.float64]] = {} + coords: dict[str, NDArray[np.floating]] = {} dims: list[str] = [] # set angle axis @@ -146,7 +146,7 @@ def to_dataarray( return data_array @property - def integrated_intensity(self) -> np.float64: + def integrated_intensity(self) -> np.floating: """Return the integrated intensity.""" return np.sum(self.intensity) @@ -310,7 +310,7 @@ def _parse_user_comment( def _create_coord( axis_info: tuple[IgorSetscaleFlag, float, float, str], pixels: int, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Create coordinate array from the axis_info.""" flag, start, delta_or_end, _ = axis_info return flag.set_scale( diff --git a/src/arpes/endstations/prodigy_sp2.py b/src/arpes/endstations/prodigy_sp2.py index 4eaf109d..4d29199a 100644 --- a/src/arpes/endstations/prodigy_sp2.py +++ b/src/arpes/endstations/prodigy_sp2.py @@ -53,7 +53,7 @@ def load_sp2( params: dict[str, str | float] = {} data: list[float] = [] pixels: tuple[int, int] = (0, 0) - coords: dict[str, NDArray[np.float64]] = {} + coords: dict[str, NDArray[np.floating]] = {} with Path(path_to_file).open(encoding="Windows-1252") as sp2file: for line in sp2file: if line.startswith("#"): diff --git a/src/arpes/example_data/mock.py b/src/arpes/example_data/mock.py index 9d23512c..92eb6361 100644 --- a/src/arpes/example_data/mock.py +++ b/src/arpes/example_data/mock.py @@ -9,12 +9,12 @@ def temporal_from_rate( - t: float | NDArray[np.float64], + t: float | NDArray[np.floating], g: float, sigma: float, k_ex: float, t0: float = 0, -) -> float | NDArray[np.float64]: +) -> float | NDArray[np.floating]: """Temporal profile. From a rate equation, which is used in (for example) diff --git a/src/arpes/fits/fit_models/bands.py b/src/arpes/fits/fit_models/bands.py index 13bb4cd9..cf26e065 100644 --- a/src/arpes/fits/fit_models/bands.py +++ b/src/arpes/fits/fit_models/bands.py @@ -24,11 +24,11 @@ class ParabolicDispersionPhiModel(Model): def parabolic_band_dispersion_phi( self, - x: NDArray[np.float64], + x: NDArray[np.floating], effective_mass: float, phi_offset: float, energy_offset: float, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Return the energy at the emission angle under the free electron band model.""" return energy_offset * effective_mass / (effective_mass - np.sin((x - phi_offset) ** 2)) @@ -43,8 +43,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: NDArray[np.float64] | XrTypes, - x: NDArray[np.float64] | xr.DataArray, + data: NDArray[np.floating] | XrTypes, + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" diff --git a/src/arpes/fits/fit_models/decay.py b/src/arpes/fits/fit_models/decay.py index 3339d0fd..4f5c189f 100644 --- a/src/arpes/fits/fit_models/decay.py +++ b/src/arpes/fits/fit_models/decay.py @@ -24,12 +24,12 @@ class ExponentialDecayCModel(Model): @staticmethod def exponential_decay_c( - x: NDArray[np.float64], + x: NDArray[np.floating], amp: float, tau: float, t0: float, const_bkg: float, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Represents an exponential decay after a point (delta) impulse. This coarsely models the dynamics after excitation in a @@ -63,8 +63,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: NDArray[np.float64] | XrTypes, - x: NDArray[np.float64] | xr.DataArray, + data: NDArray[np.floating] | XrTypes, + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" @@ -92,13 +92,13 @@ class TwoExponentialDecayCModel(Model): @staticmethod def twoexponential_decay_c( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], amp: float, t0: float, tau1: float, tau2: float, const_bkg: float, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Like `exponential_decay_c`, except with two timescales. This is meant to model if two different quasiparticle decay channels are allowed, @@ -126,8 +126,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: NDArray[np.float64] | xr.DataArray, - x: NDArray[np.float64] | xr.DataArray, + data: NDArray[np.floating] | xr.DataArray, + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Placeholder for making better heuristic guesses here.""" diff --git a/src/arpes/fits/fit_models/dirac.py b/src/arpes/fits/fit_models/dirac.py index 69ab7892..3f5eb3ea 100644 --- a/src/arpes/fits/fit_models/dirac.py +++ b/src/arpes/fits/fit_models/dirac.py @@ -25,14 +25,14 @@ class DiracDispersionModel(Model): def dirac_dispersion( # noqa: PLR0913 self, - x: NDArray[np.float64], + x: NDArray[np.floating], kd: float = 1.6, amplitude_1: float = 1, amplitude_2: float = 1, center: float = 0, sigma_1: float = 1, sigma_2: float = 1, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Model for dirac_dispersion symmetric about the dirac point. Fits lorentziants to (kd-center) and (kd+center) @@ -73,8 +73,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: NDArray[np.float64] | XrTypes, - x: NDArray[np.float64] | xr.DataArray, + data: NDArray[np.floating] | XrTypes, + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" diff --git a/src/arpes/fits/fit_models/fermi_edge.py b/src/arpes/fits/fit_models/fermi_edge.py index 84362100..1bb769d2 100644 --- a/src/arpes/fits/fit_models/fermi_edge.py +++ b/src/arpes/fits/fit_models/fermi_edge.py @@ -75,8 +75,8 @@ def __init__( def guess( self, - data: XrTypes | NDArray[np.float64], - x: NDArray[np.float64] | xr.DataArray, + data: XrTypes | NDArray[np.floating], + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" @@ -133,8 +133,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: XrTypes | NDArray[np.float64], - x: NDArray[np.float64] | xr.DataArray, + data: XrTypes | NDArray[np.floating], + x: NDArray[np.floating] | xr.DataArray, **kwargs: Incomplete, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" @@ -176,8 +176,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: NDArray[np.float64] | XrTypes, - x: NDArray[np.float64] | xr.DataArray, + data: NDArray[np.floating] | XrTypes, + x: NDArray[np.floating] | xr.DataArray, **kwargs: Incomplete, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" @@ -224,7 +224,7 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, data: XrTypes, - x: NDArray[np.float64] | xr.DataArray, + x: NDArray[np.floating] | xr.DataArray, **kwargs: Incomplete, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" @@ -261,7 +261,7 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, data: XrTypes, - x: NDArray[np.float64] | xr.DataArray, + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Placeholder for making better heuristic guesses here. @@ -296,14 +296,14 @@ class BandEdgeBGModel(Model): @staticmethod def band_edge_bkg_gauss( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], width: float = 0.05, amplitude: float = 1, gamma: float = 0.1, lor_center: float = 0, lin_slope: float = 0, const_bkg: float = 0, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Fitting model for Lorentzian and background multiplied into Fermi dirac distribution.""" return np.convolve( np.asarray( @@ -341,7 +341,7 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, data: XrTypes, - x: NDArray[np.float64] | None = None, + x: NDArray[np.floating] | None = None, **kwargs: float, ) -> lf.Parameters: """Placeholder for making better heuristic guesses here. @@ -382,12 +382,12 @@ class GStepBStandardModel(Model): @staticmethod def gstepb_standard( - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, sigma: float = 1, amplitude: float = 1, **kwargs: Incomplete, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Specializes parameters in gstepb.""" return gstepb(x, center, width=sigma, erf_amp=amplitude, **kwargs) @@ -406,7 +406,7 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, data: XrTypes, - x: NDArray[np.float64] | xr.DataArray, + x: NDArray[np.floating] | xr.DataArray, **kwargs: Incomplete, ) -> lf.Parameters: """Estimate initial model parameter values from data.""" diff --git a/src/arpes/fits/fit_models/functional_forms.py b/src/arpes/fits/fit_models/functional_forms.py index 62ea731b..c75489d7 100644 --- a/src/arpes/fits/fit_models/functional_forms.py +++ b/src/arpes/fits/fit_models/functional_forms.py @@ -26,11 +26,11 @@ def fermi_dirac( - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 0.05, scale: float = 1, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: r"""Fermi edge, with somewhat arbitrary normalization. :math:`\frac{scale}{\exp\left(\frac{x-center}{width} +1\right)}` @@ -41,13 +41,13 @@ def fermi_dirac( def affine_broadened_fd( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 0.003, sigma: float = 0.02, const_bkg: float = 1, lin_slope: float = 0, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Fermi function convoled with a Gaussian together with affine background. Args: @@ -75,13 +75,13 @@ def affine_broadened_fd( # noqa: PLR0913 def gstepb( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 1, erf_amp: float = 1, lin_slope: float = 0, const_bkg: float = 0, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Complementary error function as a approximate of the Fermi function convoled with a Gaussian. This accurately represents low temperature steps where thermal broadening is @@ -102,11 +102,11 @@ def gstepb( # noqa: PLR0913 def gstep( - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 1, erf_amp: float = 1, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: r"""Fermi function convolved with a Gaussian. :math:`\frac{erf\_amp}{2} \tims \mathrm{erfc}\left(\frac{(x-center)}{w}\right) @@ -124,7 +124,7 @@ def gstep( def band_edge_bkg( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 0.05, amplitude: float = 1, @@ -132,7 +132,7 @@ def band_edge_bkg( # noqa: PLR0913 lor_center: float = 0, lin_slope: float = 0, const_bkg: float = 0, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Lorentzian plus affine background multiplied into fermi edge with overall offset. Todo: Reconsidering the Need. @@ -147,18 +147,18 @@ def band_edge_bkg( # noqa: PLR0913 def fermi_dirac_affine( - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 0.05, lin_slope: float = 0, const_bkg: float = 1, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Fermi step edge with a linear background above the Fermi level.""" return (const_bkg + lin_slope * (x - center)) * fermi_dirac(x=x, center=center, width=width) def gstepb_mult_lorentzian( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], center: float = 0, width: float = 1, erf_amp: float = 1, @@ -166,7 +166,7 @@ def gstepb_mult_lorentzian( # noqa: PLR0913 const_bkg: float = 0, gamma: float = 1, lorcenter: float = 0, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """A Lorentzian multiplied by a gstepb background.""" return gstepb(x, center, width, erf_amp, lin_slope, const_bkg) * lorentzian( x=x, diff --git a/src/arpes/fits/fit_models/misc.py b/src/arpes/fits/fit_models/misc.py index ccde1302..232291d1 100644 --- a/src/arpes/fits/fit_models/misc.py +++ b/src/arpes/fits/fit_models/misc.py @@ -27,12 +27,12 @@ class FermiVelocityRenormalizationModel(Model): @staticmethod def fermi_velocity_renormalization_mfl( - x: NDArray[np.float64], + x: NDArray[np.floating], n0: float, v0: float, alpha: float, eps: float, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """A model for Logarithmic Renormalization to Fermi Velocity in Dirac Materials. Args: @@ -57,8 +57,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: XrTypes | NDArray[np.float64], - x: NDArray[np.float64] | xr.DataArray, + data: XrTypes | NDArray[np.floating], + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Placeholder for parameter estimation.""" @@ -79,13 +79,13 @@ class LogRenormalizationModel(Model): @staticmethod def log_renormalization( # noqa: PLR0913 - x: NDArray[np.float64], + x: NDArray[np.floating], kF: float = 1.6, # noqa: N803 kD: float = 1.6, # noqa: N803 kC: float = 1.7, # noqa: N803 alpha: float = 0.4, vF: float = 1e6, # noqa: N803 - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Logarithmic correction to linear dispersion near charge neutrality in Dirac materials. As examples, this can be used to study the low energy physics in high quality ARPES spectra @@ -119,8 +119,8 @@ def __init__(self, **kwargs: Unpack[ModelArgs]) -> None: def guess( self, - data: XrTypes | NDArray[np.float64], - x: NDArray[np.float64] | xr.DataArray, + data: XrTypes | NDArray[np.floating], + x: NDArray[np.floating] | xr.DataArray, **kwargs: float, ) -> lf.Parameters: """Placeholder for actually making parameter estimates here.""" diff --git a/src/arpes/fits/fit_models/two_dimensional.py b/src/arpes/fits/fit_models/two_dimensional.py index 4e722f59..c7a56a49 100644 --- a/src/arpes/fits/fit_models/two_dimensional.py +++ b/src/arpes/fits/fit_models/two_dimensional.py @@ -30,8 +30,8 @@ class Gaussian2DModel(Model): @staticmethod def gaussian_2d_bkg( # noqa: PLR0913 - x: NDArray[np.float64] | xr.DataArray, - y: NDArray[np.float64] | xr.DataArray, + x: NDArray[np.floating] | xr.DataArray, + y: NDArray[np.floating] | xr.DataArray, amplitude: float = 1, xc: float = 0, yc: float = 0, @@ -40,7 +40,7 @@ def gaussian_2d_bkg( # noqa: PLR0913 const_bkg: float = 0, x_bkg: float = 0, y_bkg: float = 0, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Defines a multidimensional axis aligned normal.""" bkg = np.outer(x * 0 + 1, y_bkg * y) + np.outer(x * x_bkg, y * 0 + 1) + const_bkg # make the 2D Gaussian matrix @@ -89,8 +89,8 @@ class EffectiveMassModel(Model): @staticmethod def effective_mass_bkg( # noqa: PLR0913 - eV: NDArray[np.float64], # noqa: N803 - kp: NDArray[np.float64], + eV: NDArray[np.floating], # noqa: N803 + kp: NDArray[np.floating], m_star: float = 0, k_center: float = 0, eV_center: float = 0, # noqa: N803 @@ -100,7 +100,7 @@ def effective_mass_bkg( # noqa: PLR0913 const_bkg: float = 0, k_bkg: float = 0, eV_bkg: float = 0, # noqa: N803 - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Model implementation function for simultaneous 2D curve fitting of band effective mass. Allows for an affine background in each dimension, together with variance in the band diff --git a/src/arpes/models/band.py b/src/arpes/models/band.py index bab437ec..ba668090 100644 --- a/src/arpes/models/band.py +++ b/src/arpes/models/band.py @@ -60,8 +60,8 @@ def velocity(self) -> xr.DataArray: sigma: float = 0.1 / spacing raw_values = self.embed_nan(self.center.values, 50) - masked: NDArray[np.float64] = np.nan_to_num(np.copy(raw_values), nan=0.0) - nan_mask: NDArray[np.float64] = np.nan_to_num(np.copy(raw_values) * 0 + 1, nan=0.0) + masked: NDArray[np.floating] = np.nan_to_num(np.copy(raw_values), nan=0.0) + nan_mask: NDArray[np.floating] = np.nan_to_num(np.copy(raw_values) * 0 + 1, nan=0.0) nan_mask = scipy.ndimage.gaussian_filter(nan_mask, sigma, mode="mirror") masked = scipy.ndimage.gaussian_filter(masked, sigma, mode="mirror") @@ -100,7 +100,7 @@ def get_dataarray( var_name: str, # Literal["center", "amplitude", "sigma""] *, clean: bool = True, - ) -> xr.DataArray | NDArray[np.float64]: + ) -> xr.DataArray | NDArray[np.floating]: """Converts the underlying data into an array representation.""" assert isinstance(self._data, xr.Dataset) if not clean: @@ -123,7 +123,7 @@ def center(self) -> xr.DataArray: return center_array @property - def center_stderr(self) -> NDArray[np.float64]: + def center_stderr(self) -> NDArray[np.floating]: """Gets the peak location stderr along the band.""" center_stderr = self.get_dataarray("center_stderr", clean=False) assert isinstance(center_stderr, np.ndarray) @@ -162,17 +162,17 @@ def dims(self) -> tuple[str, ...]: return self._data.center.dims @staticmethod - def embed_nan(values: NDArray[np.float64], padding: int) -> NDArray[np.float64]: + def embed_nan(values: NDArray[np.floating], padding: int) -> NDArray[np.floating]: """Return np.ndarray padding before and after the original NDArray with nan. Args: values: [TODO:description] padding: the length of the padding - Returns: NDArray[np.float64] + Returns: NDArray[np.floating] [TODO:description] """ - embedded: NDArray[np.float64] = np.full( + embedded: NDArray[np.floating] = np.full( shape=(values.shape[0] + 2 * padding,), fill_value=np.nan, dtype=np.float64, diff --git a/src/arpes/plotting/coordinates.py b/src/arpes/plotting/coordinates.py index 84f22f09..c60766f8 100644 --- a/src/arpes/plotting/coordinates.py +++ b/src/arpes/plotting/coordinates.py @@ -19,7 +19,7 @@ def remap_coords_to( arr: DataType, reference_arr: DataType, -) -> dict[str, NDArray[np.float64] | xr.DataArray]: +) -> dict[str, NDArray[np.floating] | xr.DataArray]: """Produces coordinates for the scan path of `arr` in the coordinate system of `reference_arr`. This needs to be thought out a bit more, namely to take into account better the diff --git a/src/arpes/plotting/dispersion.py b/src/arpes/plotting/dispersion.py index f2e027ec..2ff1bbad 100644 --- a/src/arpes/plotting/dispersion.py +++ b/src/arpes/plotting/dispersion.py @@ -112,21 +112,21 @@ def cut_dispersion_plot( # noqa: PLR0913, PLR0915 # type: ignore[arg-type] lower_part = data.sel(eV=slice(None, 0)) floor = lower_part.S.fat_sel(eV=e_floor) - bz_mask: NDArray[np.float64] = bz.reduced_bz_mask(data=lower_part, scale_zone=True) - left_mask: NDArray[np.float64] = bz.reduced_bz_E_mask( + bz_mask: NDArray[np.floating] = bz.reduced_bz_mask(data=lower_part, scale_zone=True) + left_mask: NDArray[np.floating] = bz.reduced_bz_E_mask( data=lower_part, symbol="X", e_cut=e_floor, scale_zone=True, ) - right_mask: NDArray[np.float64] = bz.reduced_bz_E_mask( + right_mask: NDArray[np.floating] = bz.reduced_bz_E_mask( data=lower_part, symbol="Y", e_cut=e_floor, scale_zone=True, ) - def mask_for(x: NDArray[np.float64]) -> NDArray[np.float64]: + def mask_for(x: NDArray[np.floating]) -> NDArray[np.floating]: return left_mask if x.shape == left_mask.shape else right_mask x_dim, y_dim, z_dim = tuple(new_dim_order) diff --git a/src/arpes/plotting/false_color.py b/src/arpes/plotting/false_color.py index eeda1679..a40864ba 100644 --- a/src/arpes/plotting/false_color.py +++ b/src/arpes/plotting/false_color.py @@ -55,7 +55,7 @@ def false_color_plot( # noqa: PLR0913 fig, ax = plt.subplots(figsize=figsize) assert isinstance(ax, Axes) - def normalize_channel(channel: NDArray[np.float64]) -> NDArray[np.float64]: + def normalize_channel(channel: NDArray[np.floating]) -> NDArray[np.float64]: channel -= np.percentile(channel, 100 * pmin) channel[channel > np.percentile(channel, 100 * pmax)] = np.percentile(channel, 100 * pmax) return channel / np.max(channel) diff --git a/src/arpes/plotting/fermi_surface.py b/src/arpes/plotting/fermi_surface.py index 0bfae421..7a8474b9 100644 --- a/src/arpes/plotting/fermi_surface.py +++ b/src/arpes/plotting/fermi_surface.py @@ -74,7 +74,7 @@ def fermi_surface_slices( @save_plot_provenance def magnify_circular_regions_plot( # noqa: PLR0913 data: xr.DataArray, - magnified_points: NDArray[np.float64] | list[float], + magnified_points: NDArray[np.floating] | list[float], mag: float = 10, radius: float = 0.05, # below this two can be treated as kwargs? diff --git a/src/arpes/plotting/movie.py b/src/arpes/plotting/movie.py index a9b55d87..e9993349 100644 --- a/src/arpes/plotting/movie.py +++ b/src/arpes/plotting/movie.py @@ -390,27 +390,27 @@ def update(frame: int) -> Iterable[Artist]: ) -def _replace_after_col(array: NDArray[np.float64], col_num: int) -> NDArray[np.float64]: +def _replace_after_col(array: NDArray[np.floating], col_num: int) -> NDArray[np.floating]: """Replace elements in the array with NaN af ter a specified column. Args: - array (NDArray[np.float64): The input array. + array (NDArray[np.floating): The input array. col_num (int): The column number after which elements will be replaced with NaN. Returns: - NDArray[np.float64]: The modified array with NaN values after the specified column. + NDArray[np.floating]: The modified array with NaN values after the specified column. """ return np.where(np.arange(array.shape[1])[:, None] >= col_num, np.nan, array.T).T -def _replace_after_row(array: NDArray[np.float64], row_num: int) -> NDArray[np.float64]: +def _replace_after_row(array: NDArray[np.floating], row_num: int) -> NDArray[np.floating]: """Replace elements in the array with NaN after a specified row. Args: - array (NDArray[np.float64]): The input array. + array (NDArray[np.floating]): The input array. row_num (int): The row number after which elements will be replaced with NaN. Returns: - NDArray[np.float64]: The modified array with NaN values after the specified row. + NDArray[np.floating]: The modified array with NaN values after the specified row. """ return np.where(np.arange(array.shape[0])[:, None] >= row_num, np.nan, array) diff --git a/src/arpes/plotting/parameter.py b/src/arpes/plotting/parameter.py index 6dae4417..296950b4 100644 --- a/src/arpes/plotting/parameter.py +++ b/src/arpes/plotting/parameter.py @@ -56,7 +56,7 @@ def plot_parameter( # noqa: PLR0913 ds = fit_data.F.param_as_dataset(param_name) x_name = ds.value.dims[0] - x: NDArray[np.float64] = ds.coords[x_name].values + x: NDArray[np.floating] = ds.coords[x_name].values kwargs.setdefault("fillstyle", "none") kwargs.setdefault("markersize", 8) kwargs.setdefault("color", "#1f77b4") # matplotlib.colors.TABLEAU_COLORS["tab:blue"] diff --git a/src/arpes/plotting/spin.py b/src/arpes/plotting/spin.py index 32e63024..6253705c 100644 --- a/src/arpes/plotting/spin.py +++ b/src/arpes/plotting/spin.py @@ -252,7 +252,7 @@ def polarization_intensity_to_color( data: xr.Dataset, vmax: float = 0, pmax: float = 1, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Converts a dataset with intensity and polarization into a RGB colorarray. This consists of a few steps: diff --git a/src/arpes/plotting/stack_plot.py b/src/arpes/plotting/stack_plot.py index d539c20c..297f5b89 100644 --- a/src/arpes/plotting/stack_plot.py +++ b/src/arpes/plotting/stack_plot.py @@ -593,7 +593,7 @@ def stack_dispersion_plot( # noqa: PLR0913 # pragma: no cover ) max_intensity_over_stacks = np.nanmax(data_arr.values) - cvalues: NDArray[np.float64] = data_arr.coords[other_axis].values + cvalues: NDArray[np.floating] = data_arr.coords[other_axis].values if not scale_factor: scale_factor = _scale_factor( @@ -682,9 +682,9 @@ def stack_dispersion_plot( # noqa: PLR0913 # pragma: no cover def _y_shifted( offset_correction: Literal["zero", "constant", "constant_right"] | None, marginal: xr.DataArray, - coord_value: NDArray[np.float64], + coord_value: NDArray[np.floating], scale_parameters: tuple[float, float, bool], -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: scale_factor = scale_parameters[0] max_intensity_over_stacks = scale_parameters[1] negate = scale_parameters[2] diff --git a/src/arpes/plotting/utils.py b/src/arpes/plotting/utils.py index 2b730a44..39d85304 100644 --- a/src/arpes/plotting/utils.py +++ b/src/arpes/plotting/utils.py @@ -131,8 +131,8 @@ def mod_plot_to_ax( assert isinstance(data_arr, xr.DataArray) assert isinstance(ax, Axes) with unchanged_limits(ax): - xs: NDArray[np.float64] = data_arr.coords[data_arr.dims[0]].values - ys: NDArray[np.float64] = mod.eval(x=xs) + xs: NDArray[np.floating] = data_arr.coords[data_arr.dims[0]].values + ys: NDArray[np.floating] = mod.eval(x=xs) ax.plot(xs, ys, **kwargs) @@ -213,7 +213,7 @@ def color_for_darkbackground(obj: Colorbar | Axes) -> None: def data_to_axis_units( points: tuple[float, float], ax: Axes | None = None, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Converts from data coordinates to axis coordinates (figure pixcels).""" if ax is None: ax = plt.gca() @@ -224,7 +224,7 @@ def data_to_axis_units( def axis_to_data_units( points: tuple[float, float], ax: Axes | None = None, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Converts from axis coordinate to data coorinates.""" if ax is None: ax = plt.gca() @@ -234,7 +234,7 @@ def axis_to_data_units( def ddata_daxis_units( ax: Axes | None = None, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Gives the derivative of data units with respect to axis units.""" if ax is None: ax = plt.gca() @@ -246,7 +246,7 @@ def ddata_daxis_units( def daxis_ddata_units( ax: Axes | None = None, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Gives the derivative of axis units with respect to data units.""" if ax is None: ax = plt.gca() @@ -688,7 +688,7 @@ def insert_cut_locator( n = 200 - def resolve(name: Hashable, value: slice | int) -> NDArray[np.float64]: + def resolve(name: Hashable, value: slice | int) -> NDArray[np.floating]: if isinstance(value, slice): low = value.start high = value.stop diff --git a/src/arpes/preparation/axis.py b/src/arpes/preparation/axis.py index 313eb0df..4342d625 100644 --- a/src/arpes/preparation/axis.py +++ b/src/arpes/preparation/axis.py @@ -248,13 +248,13 @@ def __call__( *args: Any, axis: int, **kwargs: Any, - ) -> NDArray[np.float64]: ... + ) -> NDArray[np.floating]: ... def transform_dataarray_axis( # noqa: PLR0913 func: AxisCallable, old_and_new_axis_names: tuple[str, str], - new_axis: NDArray[np.float64] | xr.DataArray, + new_axis: NDArray[np.floating] | xr.DataArray, dataset: xr.Dataset, prep_name: Callable[[str], str], *, @@ -265,7 +265,7 @@ def transform_dataarray_axis( # noqa: PLR0913 Args: func (Callable): The function to apply to the axis of the DataArray old_and_new_axis_names (tuple[str, str]): Tuple containing the old and new axis names - new_axis (NDArray[np.float64] | xr.DataArray): Values for the new axis + new_axis (NDArray[np.floating] | xr.DataArray): Values for the new axis dataset (xr.Dataset): The dataset to transform prep_name (Callable): Function to prepare the name for the transformed DataArrays transform_spectra (dict[str, xr.DataArray] | None): Dictionary of spectra to transform diff --git a/src/arpes/preparation/tof.py b/src/arpes/preparation/tof.py index d56f6662..418dc035 100644 --- a/src/arpes/preparation/tof.py +++ b/src/arpes/preparation/tof.py @@ -28,7 +28,7 @@ @update_provenance("Convert ToF data from timing signal to kinetic energy") def convert_to_kinetic_energy( dataarray: xr.DataArray, - kinetic_energy_axis: NDArray[np.float64], + kinetic_energy_axis: NDArray[np.floating], ) -> xr.DataArray: """Convert the ToF timing information into an energy histogram. @@ -51,7 +51,7 @@ def convert_to_kinetic_energy( dataarray = dataarray.transpose(*new_dim_order) new_dim_order[0] = "eV" - timing: NDArray[np.float64] = dataarray.coords["time"].values + timing: NDArray[np.floating] = dataarray.coords["time"].values assert timing[1] > timing[0] t_min, t_max = timing.min().item(), timing.max().item() @@ -101,7 +101,7 @@ def energy_to_time(conv: float, energy: float) -> np.float64: def build_KE_coords_to_time_pixel_coords( dataset: xr.Dataset, - interpolation_axis: NDArray[np.float64], + interpolation_axis: NDArray[np.floating], ) -> Callable[..., tuple[xr.DataArray]]: """Constructs a coordinate conversion function from kinetic energy to time pixels.""" conv = ( @@ -143,7 +143,7 @@ def KE_coords_to_time_pixel_coords( def build_KE_coords_to_time_coords( dataset: xr.Dataset, - interpolation_axis: NDArray[np.float64], + interpolation_axis: NDArray[np.floating], ) -> Callable[..., tuple[xr.DataArray]]: """Constructs a coordinate conversion function from kinetic energy to time coords. diff --git a/src/arpes/provenance.py b/src/arpes/provenance.py index 68d508b7..b681134f 100644 --- a/src/arpes/provenance.py +++ b/src/arpes/provenance.py @@ -81,7 +81,7 @@ class Provenance(_Provenance, total=False): sigma: dict[Hashable, float] # analysis.filters size: dict[Hashable, float] # analysis.filters use_pixel: bool # analysis.filters - correction: list[NDArray[np.float64]] # fermi_edge_correction + correction: list[NDArray[np.floating]] # fermi_edge_correction dims: Sequence[str | Hashable] dim: str old_axis: str diff --git a/src/arpes/simulation.py b/src/arpes/simulation.py index b4f175e7..a28caa65 100644 --- a/src/arpes/simulation.py +++ b/src/arpes/simulation.py @@ -152,9 +152,9 @@ class WindowedDetectorEffect(DetectorEffect): def cloud_to_arr( - point_cloud: list[list[float]] | Iterable[NDArray[np.float64]], + point_cloud: list[list[float]] | Iterable[NDArray[np.floating]], shape: tuple[int, int], -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Converts a point cloud (list of xy pairs) to an array representation. Uses linear interpolation for points that have non-integral coordinates. @@ -189,10 +189,10 @@ def cloud_to_arr( def apply_psf_to_point_cloud( - point_cloud: list[list[float]] | Iterable[NDArray[np.float64]], + point_cloud: list[list[float]] | Iterable[NDArray[np.floating]], shape: tuple[int, int], sigma: tuple[int, int] = (10, 3), # Note: Pixel units -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Takes a point cloud and turns it into a broadened spectrum. Samples are drawn individually and smeared by a @@ -222,7 +222,7 @@ def apply_psf_to_point_cloud( def sample_from_distribution( distribution: xr.DataArray, n: int = 5000, -) -> tuple[NDArray[np.float64], NDArray[np.float64]]: +) -> tuple[NDArray[np.floating], NDArray[np.floating]]: """Samples events from a probability distribution. Given a probability distribution in ND modeled by an array providing the PDF, @@ -278,8 +278,8 @@ def digest_to_json(self) -> dict[str, Any]: def __init__( self, - k: NDArray[np.float64] | None = None, - omega: NDArray[np.float64] | None = None, + k: NDArray[np.floating] | None = None, + omega: NDArray[np.floating] | None = None, temperature: float = 20, ) -> None: """Initialize from parameters. @@ -300,23 +300,23 @@ def __init__( self.temperature = temperature self.omega = omega - self.k: NDArray[np.float64] = k + self.k: NDArray[np.floating] = k - def imag_self_energy(self) -> NDArray[np.float64]: + def imag_self_energy(self) -> NDArray[np.floating]: """Provides the imaginary part of the self energy.""" return np.zeros( shape=self.omega.shape, ) - def real_self_energy(self) -> NDArray[np.float64]: + def real_self_energy(self) -> NDArray[np.floating]: """Defaults to using Kramers-Kronig from the imaginary self energy.""" return np.imag(sig.hilbert(self.imag_self_energy())) - def self_energy(self) -> NDArray[np.complex128]: + def self_energy(self) -> NDArray[np.complexfloating]: """Combines the self energy terms into a complex valued array.""" return self.real_self_energy() + 1.0j * self.imag_self_energy() - def bare_band(self) -> NDArray[np.float64]: + def bare_band(self) -> NDArray[np.floating]: """Provides the bare band dispersion.""" return 3 * self.k @@ -408,8 +408,8 @@ def digest_to_json(self) -> dict[str, Any]: def __init__( self, - k: NDArray[np.float64] | None = None, - omega: NDArray[np.float64] | None = None, + k: NDArray[np.floating] | None = None, + omega: NDArray[np.floating] | None = None, temperature: float = 20, mfl_parameter: tuple[float, float] = (10.0, 1.0), ) -> None: @@ -426,7 +426,7 @@ def __init__( self.a, self.b = mfl_parameter - def imag_self_energy(self) -> NDArray[np.float64]: + def imag_self_energy(self) -> NDArray[np.floating]: """Calculates the imaginary part of the self energy.""" return np.sqrt((self.a + self.b * self.omega) ** 2 + self.temperature**2) @@ -440,8 +440,8 @@ class SpectralFunctionBSSCO(SpectralFunction): def __init__( self, - k: NDArray[np.float64] | None = None, - omega: NDArray[np.float64] | None = None, + k: NDArray[np.floating] | None = None, + omega: NDArray[np.floating] | None = None, temperature: float = 20, gap_parameters: tuple[float, float, float] = (50, 30, 0), ) -> None: @@ -467,7 +467,7 @@ def digest_to_json(self) -> dict[str, Any]: "gamma_p": self.gamma_p, } - def self_energy(self) -> NDArray[np.complex128]: + def self_energy(self) -> NDArray[np.complexfloating]: """Calculates the self energy.""" shape = (len(self.omega), len(self.k)) @@ -505,7 +505,7 @@ def spectral_function(self) -> xr.DataArray: class SpectralFunctionPhaseCoherent(SpectralFunctionBSSCO): """Implements the "phase coherence" model for the BSSCO spectral function.""" - def self_energy(self) -> NDArray[np.complex128]: + def self_energy(self) -> NDArray[np.complexfloating]: """Calculates the self energy using the phase coherent BSSCO model.""" shape = (len(self.omega), len(self.k)) diff --git a/src/arpes/utilities/bz.py b/src/arpes/utilities/bz.py index aa7e32c7..3aa70d22 100644 --- a/src/arpes/utilities/bz.py +++ b/src/arpes/utilities/bz.py @@ -60,7 +60,7 @@ def process_kpath( path: str, cell: Cell, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Converts paths consisting of point definitions to raw coordinates. Args: @@ -130,7 +130,7 @@ def reduced_bz_axis_to( symbol: str, *, include_E: bool = False, # noqa: N803 -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates a displacement vector to a modded high symmetry point. Args: @@ -173,7 +173,7 @@ def reduced_bz_axis_to( raise NotImplementedError -def reduced_bz_axes(data: XrTypes) -> tuple[NDArray[np.float64], NDArray[np.float64]]: +def reduced_bz_axes(data: XrTypes) -> tuple[NDArray[np.floating], NDArray[np.floating]]: """Calculates displacement vectors to high symmetry points in the first Brillouin zone. Args: @@ -247,7 +247,7 @@ def reduced_bz_poly( data: XrTypes, *, scale_zone: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Returns a polynomial representing the reduce first Brillouin zone. Args: @@ -298,7 +298,7 @@ def reduced_bz_E_mask( e_cut: float, *, scale_zone: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates a mask for data which contains points below an energy cutoff. Args: @@ -307,7 +307,7 @@ def reduced_bz_E_mask( e_cut: [TODO:description] scale_zone: [TODO:description] - Returns: NDArray[np.float64] + Returns: NDArray[np.floating] [TODO:description] ToDo: Test @@ -361,7 +361,7 @@ def reduced_bz_E_mask( return np.reshape(mask, sdata.data.shape) -def reduced_bz_mask(data: XrTypes, **kwargs: Incomplete) -> NDArray[np.float64]: +def reduced_bz_mask(data: XrTypes, **kwargs: Incomplete) -> NDArray[np.floating]: """Calculates a mask for the first Brillouin zone of a piece of data. Args: diff --git a/src/arpes/utilities/conversion/api.py b/src/arpes/utilities/conversion/api.py index 3ffa58ab..f9595dfa 100644 --- a/src/arpes/utilities/conversion/api.py +++ b/src/arpes/utilities/conversion/api.py @@ -204,7 +204,7 @@ def convert_to_kspace( # noqa: PLR0913 calibration=calibration, ) - converted_coordinates: dict[Hashable, NDArray[np.float64]] = converter.get_coordinates( + converted_coordinates: dict[Hashable, NDArray[np.floating]] = converter.get_coordinates( resolution=resolution, bounds=bounds, ) diff --git a/src/arpes/utilities/conversion/base.py b/src/arpes/utilities/conversion/base.py index f3ff12bb..6f85b5fc 100644 --- a/src/arpes/utilities/conversion/base.py +++ b/src/arpes/utilities/conversion/base.py @@ -73,7 +73,7 @@ def __init__( self.arr = arr self.dim_order = dim_order self.calibration = calibration - self.phi: NDArray[np.float64] | None = None + self.phi: NDArray[np.floating] | None = None @staticmethod @abstractmethod @@ -114,9 +114,9 @@ def is_slit_vertical(self) -> bool: @staticmethod def kspace_to_BE( - binding_energy: NDArray[np.float64], - *args: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + *args: NDArray[np.floating], + ) -> NDArray[np.floating]: """The energy conservation equation for ARPES. This does not depend on any details of the angular conversion (it's the identity) so we can @@ -130,10 +130,10 @@ def kspace_to_BE( def conversion_for( self, dim: Hashable, - ) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: + ) -> Callable[[NDArray[np.floating]], NDArray[np.floating]]: """Fetches the method responsible for calculating `dim` from momentum coordinates.""" - def identity_transform(self, axis_name: Hashable, *args: Incomplete) -> NDArray[np.float64]: + def identity_transform(self, axis_name: Hashable, *args: Incomplete) -> NDArray[np.floating]: """Just returns the coordinate requested from args. Useful if the transform is the identity. @@ -148,7 +148,7 @@ def get_coordinates( self, resolution: dict[str, float] | None = None, bounds: dict[str, tuple[float, float]] | None = None, - ) -> dict[Hashable, NDArray[np.float64]]: + ) -> dict[Hashable, NDArray[np.floating]]: """Calculates the coordinates which should be used in momentum space. Args: diff --git a/src/arpes/utilities/conversion/bounds_calculations.py b/src/arpes/utilities/conversion/bounds_calculations.py index b7c3487e..01d98d9a 100644 --- a/src/arpes/utilities/conversion/bounds_calculations.py +++ b/src/arpes/utilities/conversion/bounds_calculations.py @@ -21,17 +21,17 @@ def full_angles_to_k( # noqa: PLR0913 - kinetic_energy: NDArray[np.float64] | float, - phi: float | NDArray[np.float64], - psi: float | NDArray[np.float64], - alpha: float | NDArray[np.float64], - beta: float | NDArray[np.float64], - theta: float | NDArray[np.float64], - chi: float | NDArray[np.float64], + kinetic_energy: NDArray[np.floating] | float, + phi: float | NDArray[np.floating], + psi: float | NDArray[np.floating], + alpha: float | NDArray[np.floating], + beta: float | NDArray[np.floating], + theta: float | NDArray[np.floating], + chi: float | NDArray[np.floating], inner_potential: float, ) -> ( tuple[float, float, float] - | tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]] + | tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]] ): """Converts from the full set of standard PyARPES angles to momentum. @@ -100,26 +100,26 @@ def full_angles_to_k( # noqa: PLR0913 def euler_to_kx( - kinetic_energy: NDArray[np.float64], - phi: NDArray[np.float64] | float, - beta: NDArray[np.float64] | float, + kinetic_energy: NDArray[np.floating], + phi: NDArray[np.floating] | float, + beta: NDArray[np.floating] | float, theta: float = 0, *, slit_is_vertical: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates kx from the phi/beta Euler angles given the experimental geometry.""" factor = K_INV_ANGSTROM * np.sqrt(kinetic_energy) return factor * (np.sin(beta) * np.cos(phi) if slit_is_vertical else np.sin(phi + theta)) def euler_to_ky( - kinetic_energy: NDArray[np.float64], - phi: NDArray[np.float64] | float, - beta: NDArray[np.float64] | float, + kinetic_energy: NDArray[np.floating], + phi: NDArray[np.floating] | float, + beta: NDArray[np.floating] | float, theta: float = 0, *, slit_is_vertical: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates ky from the phi/beta Euler angles given the experimental geometry.""" return ( K_INV_ANGSTROM @@ -133,14 +133,14 @@ def euler_to_ky( def euler_to_kz( # noqa: PLR0913 - kinetic_energy: NDArray[np.float64], - phi: NDArray[np.float64] | float, - beta: NDArray[np.float64] | float, + kinetic_energy: NDArray[np.floating], + phi: NDArray[np.floating] | float, + beta: NDArray[np.floating] | float, theta: float = 0, inner_potential: float = 10, *, slit_is_vertical: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """Calculates kz from the phi/beta Euler angles given the experimental geometry.""" beta_term = ( -np.sin(theta) * np.sin(phi) + np.cos(theta) * np.cos(beta) * np.cos(phi) @@ -272,7 +272,7 @@ def calculate_kx_ky_bounds( phi_high: float phi_low, phi_high = phi_coords.min().item(), phi_coords.max().item() phi_mid: float = (phi_high + phi_low) / 2 - sampled_phi_values: NDArray[np.float64] = np.array( + sampled_phi_values: NDArray[np.floating] = np.array( [ phi_high, phi_high, @@ -310,10 +310,10 @@ def calculate_kx_ky_bounds( else arr.coords["eV"].max().item() ) # note that the type of the kinetic_energy is float in below. - kxs: NDArray[np.float64] = ( + kxs: NDArray[np.floating] = ( K_INV_ANGSTROM * np.sqrt(max_kinetic_energy) * np.sin(sampled_phi_values) ) - kys: NDArray[np.float64] = ( + kys: NDArray[np.floating] = ( K_INV_ANGSTROM * np.sqrt(max_kinetic_energy) * np.cos(sampled_phi_values) diff --git a/src/arpes/utilities/conversion/calibration.py b/src/arpes/utilities/conversion/calibration.py index 489f48da..08069a19 100644 --- a/src/arpes/utilities/conversion/calibration.py +++ b/src/arpes/utilities/conversion/calibration.py @@ -54,9 +54,9 @@ def __repr__(self) -> str: def correct_detector_angle( self, - eV: NDArray[np.float64], # noqa: N803 - phi: NDArray[np.float64], - ) -> NDArray[np.float64]: + eV: NDArray[np.floating], # noqa: N803 + phi: NDArray[np.floating], + ) -> NDArray[np.floating]: """Applies a calibration to the detector `phi` angle.""" left, right = ( np.interp(x=0, xp=self._left_edge.eV.values, fp=self._left_edge.phi.values), diff --git a/src/arpes/utilities/conversion/coordinates.py b/src/arpes/utilities/conversion/coordinates.py index 612b1267..1fc7802b 100644 --- a/src/arpes/utilities/conversion/coordinates.py +++ b/src/arpes/utilities/conversion/coordinates.py @@ -104,7 +104,7 @@ class CoordinateTransform(TypedDict, total=True): but no specific coordinate system is assumed at this level. - transforms : dict[str, Callable[..., NDArray[np.float64]]] + transforms : dict[str, Callable[..., NDArray[np.floating]]] Mapping from coordinate names to transformation functions. Each callable must accept a sequence of meshed coordinate arrays @@ -125,12 +125,12 @@ class CoordinateTransform(TypedDict, total=True): """ dims: list[str] | list[Hashable] # in most case dims should be Literal["kp", "kx", "ky", "kz"]] - transforms: dict[str, Callable[..., NDArray[np.float64]]] + transforms: dict[str, Callable[..., NDArray[np.floating]]] def convert_coordinates( arr: xr.DataArray, - target_coordinates: dict[Hashable, NDArray[np.float64]], + target_coordinates: dict[Hashable, NDArray[np.floating]], coordinate_transform: CoordinateTransform, *, as_dataset: bool = False, @@ -139,7 +139,7 @@ def convert_coordinates( Args: arr(xr.DataArray): ARPES data - target_coordinates:(dict[Hashable, NDArray[np.float64]]): coorrdinate for ... + target_coordinates:(dict[Hashable, NDArray[np.floating]]): coorrdinate for ... coordinate_transform(dict[str, list[str] | Callable]): coordinat for ... as_dataset(bool): if True, return the data as the dataSet @@ -172,7 +172,7 @@ def convert_coordinates( meshed_coordinates = [arr.S.lookup_offset_coord("eV"), *meshed_coordinates] old_coord_names = [str(dim) for dim in arr.dims if dim not in target_coordinates] assert isinstance(coordinate_transform["transforms"], dict) - transforms: dict[str, Callable[..., NDArray[np.float64]]] = coordinate_transform["transforms"] + transforms: dict[str, Callable[..., NDArray[np.floating]]] = coordinate_transform["transforms"] logger.debug(f"transforms is {transforms}") old_coordinate_transforms = [ transforms[str(dim)] for dim in arr.dims if dim not in target_coordinates @@ -181,7 +181,7 @@ def convert_coordinates( output_shape = [len(target_coordinates[str(d)]) for d in coordinate_transform["dims"]] - def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray[np.float64]: + def compute_coordinate(transform: Callable[..., NDArray[np.floating]]) -> NDArray[np.floating]: logger.debug(f"transform function is {transform}") return np.reshape( transform(*meshed_coordinates), @@ -201,7 +201,7 @@ def compute_coordinate(transform: Callable[..., NDArray[np.float64]]) -> NDArray ) # Wrap it all up - def acceptable_coordinate(c: NDArray[np.float64] | xr.DataArray) -> bool: + def acceptable_coordinate(c: NDArray[np.floating] | xr.DataArray) -> bool: """Return True if the dim of array is subset of dim of coordinate_transform. Currently we do this to filter out coordinates diff --git a/src/arpes/utilities/conversion/core.py b/src/arpes/utilities/conversion/core.py index 416eecb9..6d5e85cc 100644 --- a/src/arpes/utilities/conversion/core.py +++ b/src/arpes/utilities/conversion/core.py @@ -59,10 +59,10 @@ def grid_interpolator_from_dataarray( c = arr.coords[d] if len(c) > 1 and c[1] - c[0] < 0: flip_axes.add(str(d)) - values: NDArray[np.float64] = arr.values + values: NDArray[np.floating] = arr.values for dim in flip_axes: values = np.flip(values, arr.dims.index(dim)) - interp_points: list[NDArray[np.float64]] = [ + interp_points: list[NDArray[np.floating]] = [ arr.coords[d].values[::-1] if d in flip_axes else arr.coords[d].values for d in arr.dims ] trace_size = [len(pts) for pts in interp_points] diff --git a/src/arpes/utilities/conversion/fast_interp.py b/src/arpes/utilities/conversion/fast_interp.py index 1fa7b139..e6eaeed8 100644 --- a/src/arpes/utilities/conversion/fast_interp.py +++ b/src/arpes/utilities/conversion/fast_interp.py @@ -95,7 +95,7 @@ def raw_lin_interpolate_3d( # noqa: PLR0913 @numba.njit def lin_interpolate_3d( # noqa: PLR0913 - data: NDArray[np.float64], + data: NDArray[np.floating], ix: int, iy: int, iz: int, @@ -123,7 +123,7 @@ def lin_interpolate_3d( # noqa: PLR0913 @numba.njit def lin_interpolate_2d( # noqa: PLR0913 - data: NDArray[np.float64], + data: NDArray[np.floating], ix: int, iy: int, ixp: int, @@ -143,8 +143,8 @@ def lin_interpolate_2d( # noqa: PLR0913 @numba.njit(parallel=True) def interpolate_3d( # noqa: PLR0913 - data: NDArray[np.float64], - output: NDArray[np.float64], + data: NDArray[np.floating], + output: NDArray[np.floating], lower_corner_x: float, lower_corner_y: float, lower_corner_z: float, @@ -154,9 +154,9 @@ def interpolate_3d( # noqa: PLR0913 shape_x: int, shape_y: int, shape_z: int, - x: NDArray[np.float64], - y: NDArray[np.float64], - z: NDArray[np.float64], + x: NDArray[np.floating], + y: NDArray[np.floating], + z: NDArray[np.floating], fill_value: float = np.nan, ) -> None: for i in numba.prange(len(x)): @@ -195,16 +195,16 @@ def _is_out_of_bounds(i: tuple[float, float, float], shape: tuple[int, int, int] @numba.njit(parallel=True) def interpolate_2d( # noqa: PLR0913 - data: NDArray[np.float64], - output: NDArray[np.float64], + data: NDArray[np.floating], + output: NDArray[np.floating], lower_corner_x: float, lower_corner_y: float, delta_x: float, delta_y: float, shape_x: int, shape_y: int, - x: NDArray[np.float64], - y: NDArray[np.float64], + x: NDArray[np.floating], + y: NDArray[np.floating], fill_value: float = np.nan, ) -> None: for i in numba.prange(len(x)): @@ -240,7 +240,7 @@ class Interpolator: lower_corner: list[float] delta: list[float] shape: list[int] - data: NDArray[np.float64] + data: NDArray[np.floating] def __post_init__(self) -> None: """Convert data to floating point representation. @@ -253,8 +253,8 @@ def __post_init__(self) -> None: @classmethod def from_arrays( cls: type[Interpolator], - xyz: list[NDArray[np.float64]], - data: NDArray[np.float64], + xyz: list[NDArray[np.floating]], + data: NDArray[np.floating], ) -> Interpolator: """Initializes the interpreter from a coordinate and data array. @@ -268,7 +268,10 @@ def from_arrays( shape = [len(xi) for xi in xyz] return cls(lower_corner, delta, shape, data) - def __call__(self, xi: NDArray[np.float64] | list[NDArray[np.float64]]) -> NDArray[np.float64]: + def __call__( + self, + xi: NDArray[np.floating] | list[NDArray[np.floating]], + ) -> NDArray[np.floating]: """Performs linear interpolation at the coordinates given by `xi`. Whether 2D or 3D interpolation is used depends on the dimensionality of `xi` and diff --git a/src/arpes/utilities/conversion/kx_ky_conversion.py b/src/arpes/utilities/conversion/kx_ky_conversion.py index 1a2055b7..8e8ce6f5 100644 --- a/src/arpes/utilities/conversion/kx_ky_conversion.py +++ b/src/arpes/utilities/conversion/kx_ky_conversion.py @@ -40,10 +40,10 @@ @numba.njit(parallel=True) def _exact_arcsin( # noqa: PLR0913 - k_par: NDArray[np.float64], - k_perp: NDArray[np.float64], - k_tot: NDArray[np.float64], - phi: NDArray[np.float64], + k_par: NDArray[np.floating], + k_perp: NDArray[np.floating], + k_tot: NDArray[np.floating], + phi: NDArray[np.floating], offset: float, *, par_tot: bool, @@ -60,9 +60,9 @@ def _exact_arcsin( # noqa: PLR0913 @numba.njit(parallel=True) def _small_angle_arcsin( # noqa: PLR0913 - k_par: NDArray[np.float64], - k_tot: NDArray[np.float64], - phi: NDArray[np.float64], + k_par: NDArray[np.floating], + k_tot: NDArray[np.floating], + phi: NDArray[np.floating], offset: float, *, par_tot: bool, @@ -84,10 +84,10 @@ def _small_angle_arcsin( # noqa: PLR0913 @numba.njit(parallel=True) def _rotate_kx_ky( - kx: NDArray[np.float64], - ky: NDArray[np.float64], - kxout: NDArray[np.float64], - kyout: NDArray[np.float64], + kx: NDArray[np.floating], + ky: NDArray[np.floating], + kxout: NDArray[np.floating], + kyout: NDArray[np.floating], chi: float, ) -> None: cos_chi = np.cos(chi) @@ -101,8 +101,8 @@ def _rotate_kx_ky( def _compute_ktot( hv: float, work_function: float, - binding_energy: NDArray[np.float64], - k_tot: NDArray[np.float64], + binding_energy: NDArray[np.floating], + k_tot: NDArray[np.floating], ) -> None: """Calculate 0.512 √E. @@ -121,8 +121,8 @@ def _compute_ktot( def _safe_compute_k_tot( hv: float, work_function: float, - binding_energy: float | NDArray[np.float64] | xr.DataArray, -) -> NDArray[np.float64]: + binding_energy: float | NDArray[np.floating] | xr.DataArray, +) -> NDArray[np.floating]: if isinstance(binding_energy, float): arr_binding_energy = np.array([binding_energy]) elif isinstance(binding_energy, xr.DataArray): @@ -154,14 +154,14 @@ def __init__(self, *args: Incomplete, **kwargs: Incomplete) -> None: super().__init__(*args, **kwargs) logger.debug(f"self.dim_order: {self.dim_order}") - self.k_tot: NDArray[np.float64] | None = None - self.phi: NDArray[np.float64] | None = None + self.k_tot: NDArray[np.floating] | None = None + self.phi: NDArray[np.floating] | None = None def get_coordinates( self, resolution: dict[str, float] | None = None, bounds: dict[str, tuple[float, float]] | None = None, - ) -> dict[Hashable, NDArray[np.float64]]: + ) -> dict[Hashable, NDArray[np.floating]]: """Calculates appropriate coordinate bounds. Args: @@ -203,7 +203,7 @@ def get_coordinates( coordinates.update(base_coords) return coordinates - def compute_k_tot(self, binding_energy: NDArray[np.float64]) -> None: + def compute_k_tot(self, binding_energy: NDArray[np.floating]) -> None: """Compute the total momentum (inclusive of kz) at different binding energies.""" energy_notation = self.arr.S.energy_notation hv = self.arr.S.hv @@ -213,10 +213,10 @@ def compute_k_tot(self, binding_energy: NDArray[np.float64]) -> None: def kspace_to_phi( self, - binding_energy: NDArray[np.float64], - kp: NDArray[np.float64], + binding_energy: NDArray[np.floating], + kp: NDArray[np.floating], *args: Incomplete, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Converts from momentum back to the analyzer angular axis.""" # Dont remove *args even if not used. del args @@ -251,10 +251,12 @@ def kspace_to_phi( assert self.phi is not None return self.phi - def conversion_for(self, dim: Hashable) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: + def conversion_for( + self, dim: Hashable + ) -> Callable[[NDArray[np.floating]], NDArray[np.floating]]: """Looks up the appropriate momentum-to-angle conversion routine by dimension name.""" - def _with_identity(*args: NDArray[np.float64]) -> NDArray[np.float64]: + def _with_identity(*args: NDArray[np.floating]) -> NDArray[np.floating]: return self.identity_transform(dim, *args) return { # type: ignore[return-value] @@ -276,12 +278,12 @@ class ConvertKxKy(CoordinateConverter): def __init__(self, arr: xr.DataArray, *args: Incomplete, **kwargs: Incomplete) -> None: """Initialize the kx-ky momentum converter and cached coordinate values.""" super().__init__(arr, *args, **kwargs) - self.k_tot: NDArray[np.float64] | None = None + self.k_tot: NDArray[np.floating] | None = None # the angle perpendicular to phi as appropriate to the scan, this can be any of # psi, theta, beta - self.perp_angle: NDArray[np.float64] | None = None - self.rkx: NDArray[np.float64] | None = None - self.rky: NDArray[np.float64] | None = None + self.perp_angle: NDArray[np.floating] | None = None + self.rkx: NDArray[np.floating] | None = None + self.rky: NDArray[np.floating] | None = None # accept either vertical or horizontal, fail otherwise if not any( np.abs(arr.alpha - alpha_option) < np.deg2rad(1) for alpha_option in [0, np.pi / 2] @@ -311,7 +313,7 @@ def get_coordinates( self, resolution: dict[str, float] | None = None, bounds: dict[str, tuple[float, float]] | None = None, - ) -> dict[Hashable, NDArray[np.float64]]: + ) -> dict[Hashable, NDArray[np.floating]]: """Calculates the coordinates which should be used in momentum space. Args: @@ -375,7 +377,7 @@ def get_coordinates( coordinates.update(base_coords) return coordinates - def compute_k_tot(self, binding_energy: NDArray[np.float64]) -> None: + def compute_k_tot(self, binding_energy: NDArray[np.floating]) -> None: """Compute the total momentum (inclusive of kz) at different binding energies.""" energy_notation = self.arr.S.energy_notation hv: float = self.arr.S.hv @@ -383,10 +385,10 @@ def compute_k_tot(self, binding_energy: NDArray[np.float64]) -> None: hv = 0.0 if energy_notation is EnergyNotation.FINAL else hv self.k_tot = _safe_compute_k_tot(hv, work_function, binding_energy) - def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.float64]]: + def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.floating]]: """Looks up the appropriate momentum-to-angle conversion routine by dimension name.""" - def _with_identity(*args: NDArray[np.float64]) -> NDArray[np.float64]: + def _with_identity(*args: NDArray[np.floating]) -> NDArray[np.floating]: return self.identity_transform(dim, *args) return { # type: ignore[return-value] @@ -408,9 +410,9 @@ def needs_rotation(self) -> bool: def rkx_rky( self, - kx: NDArray[np.float64], - ky: NDArray[np.float64], - ) -> tuple[NDArray[np.float64], NDArray[np.float64]]: + kx: NDArray[np.floating], + ky: NDArray[np.floating], + ) -> tuple[NDArray[np.floating], NDArray[np.floating]]: """Returns the rotated kx and ky values when we are rotating by nonzero chi.""" if self.rkx is not None and self.rky is not None: return self.rkx, self.rky @@ -422,11 +424,11 @@ def rkx_rky( def kspace_to_phi( self, - binding_energy: NDArray[np.float64], - kx: NDArray[np.float64], - ky: NDArray[np.float64], + binding_energy: NDArray[np.floating], + kx: NDArray[np.floating], + ky: NDArray[np.floating], *args: Incomplete, - ) -> NDArray[np.float64]: + ) -> NDArray[np.floating]: """Converts from momentum back to the analyzer angular axis.""" logger.debug("the following args are not used in kspace_to_phi") logger.debug(args) @@ -467,10 +469,10 @@ def kspace_to_phi( def kspace_to_perp_angle( self, - binding_energy: NDArray[np.float64], - kx: NDArray[np.float64], - ky: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + kx: NDArray[np.floating], + ky: NDArray[np.floating], + ) -> NDArray[np.floating]: """Converts from momentum back to the scan angle perpendicular to the analyzer.""" if self.perp_angle is not None: return self.perp_angle diff --git a/src/arpes/utilities/conversion/kz_conversion.py b/src/arpes/utilities/conversion/kz_conversion.py index 0afeb1ed..58e057a8 100644 --- a/src/arpes/utilities/conversion/kz_conversion.py +++ b/src/arpes/utilities/conversion/kz_conversion.py @@ -25,10 +25,10 @@ @numba.njit(parallel=True) def _kspace_to_hv( - kp: NDArray[np.float64], - kz: NDArray[np.float64], - hv: NDArray[np.float64], - energy_shift: NDArray[np.float64], + kp: NDArray[np.floating], + kz: NDArray[np.floating], + hv: NDArray[np.floating], + energy_shift: NDArray[np.floating], *, is_constant_shift: bool, ) -> None: @@ -41,9 +41,9 @@ def _kspace_to_hv( @numba.njit(parallel=True) def _kp_to_polar( - kinetic_energy: NDArray[np.float64], - kp: NDArray[np.float64], - phi: NDArray[np.float64], + kinetic_energy: NDArray[np.floating], + kp: NDArray[np.floating], + phi: NDArray[np.floating], angle_offset: float, ) -> None: """Efficiently performs the inverse coordinate transform phi(hv, kp).""" @@ -76,13 +76,13 @@ class ConvertKpKz(CoordinateConverter): def __init__(self, *args: Incomplete, **kwargs: Incomplete) -> None: """Cache the photon energy coordinate we calculate backwards from kz.""" super().__init__(*args, **kwargs) - self.hv: NDArray[np.float64] | None = None + self.hv: NDArray[np.floating] | None = None def get_coordinates( self, resolution: dict[str, float] | None = None, bounds: dict[str, tuple[float, float]] | None = None, - ) -> dict[Hashable, NDArray[np.float64]]: + ) -> dict[Hashable, NDArray[np.floating]]: """Calculates the coordinates which should be used in momentum space. Args: @@ -131,10 +131,10 @@ def get_coordinates( def kspace_to_hv( self, - binding_energy: NDArray[np.float64], - kp: NDArray[np.float64], - kz: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + kp: NDArray[np.floating], + kz: NDArray[np.floating], + ) -> NDArray[np.floating]: """Converts from momentum back to the raw photon energy.""" if self.hv is None: inner_v = self.arr.S.inner_potential @@ -158,16 +158,16 @@ def kspace_to_hv( def kspace_to_phi( self, - binding_energy: NDArray[np.float64], - kp: NDArray[np.float64], - kz: NDArray[np.float64], - ) -> NDArray[np.float64]: + binding_energy: NDArray[np.floating], + kp: NDArray[np.floating], + kz: NDArray[np.floating], + ) -> NDArray[np.floating]: """Converts from momentum back to the hemisphere angle axis. Args: - binding_energy(NDArray[np.float64]): [TODO:description] - kp (NDArray[np.float64]): [TODO:description] - kz (NDArray[np.float64]): [TODO:description] + binding_energy(NDArray[np.floating]): [TODO:description] + kp (NDArray[np.floating]): [TODO:description] + kz (NDArray[np.floating]): [TODO:description] Returns: [TODO:description] @@ -193,10 +193,10 @@ def kspace_to_phi( self.phi = self.calibration.correct_detector_angle(eV=binding_energy, phi=self.phi) return self.phi - def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.float64]]: + def conversion_for(self, dim: Hashable) -> Callable[..., NDArray[np.floating]]: """Looks up the appropriate momentum-to-angle conversion routine by dimension name.""" - def _with_identity(*args: NDArray[np.float64]) -> NDArray[np.float64]: + def _with_identity(*args: NDArray[np.floating]) -> NDArray[np.floating]: return self.identity_transform(dim, *args) return { # type: ignore[return-value] diff --git a/src/arpes/utilities/funcutils.py b/src/arpes/utilities/funcutils.py index 7452ceb9..6de33e1b 100644 --- a/src/arpes/utilities/funcutils.py +++ b/src/arpes/utilities/funcutils.py @@ -48,7 +48,7 @@ def collect_leaves(tree: dict[str, Any], is_leaf: Incomplete = None) -> dict: A dictionary with the leaves and their direct parent key. """ - def reducer(dd: dict, item: tuple[str, NDArray[np.float64]]) -> dict: + def reducer(dd: dict, item: tuple[str, NDArray[np.floating]]) -> dict: dd[item[0]].append(item[1]) return dd diff --git a/src/arpes/utilities/selections.py b/src/arpes/utilities/selections.py index 851337d8..5cb11f7c 100644 --- a/src/arpes/utilities/selections.py +++ b/src/arpes/utilities/selections.py @@ -315,7 +315,7 @@ def select_disk_mask( around: dict[str, float] | None = None, *, flat: bool = False, -) -> NDArray[np.float64]: +) -> NDArray[np.floating]: """A complement to `select_disk` which only generates the mask for the selection. Selects the data in a disk around the point described by `around`. A point is a labelled @@ -363,7 +363,7 @@ def select_disk( around: dict[str, float] | None = None, *, invert: bool = False, -) -> tuple[dict[str, NDArray[np.float64]], NDArray[np.float64], NDArray[np.float64]]: +) -> tuple[dict[str, NDArray[np.floating]], NDArray[np.floating], NDArray[np.floating]]: """Selects the data in a disk around the point requested. (or annulus if `outer_radius` is provided) diff --git a/src/arpes/xarray_extensions/_helper/general.py b/src/arpes/xarray_extensions/_helper/general.py index f608abd2..47dafae9 100644 --- a/src/arpes/xarray_extensions/_helper/general.py +++ b/src/arpes/xarray_extensions/_helper/general.py @@ -24,7 +24,7 @@ def round_coordinates_impl( data: DataType, - coords_to_round: dict[str, list[float] | NDArray[np.float64]], + coords_to_round: dict[str, list[float] | NDArray[np.floating]], *, as_indices: bool = False, ) -> dict[str, float | int]: @@ -83,7 +83,7 @@ def filter_coord_impl( @overload def apply_over_impl( data: xr.Dataset, - fn: Callable[[xr.Dataset], xr.Dataset | NDArray[np.float64]], + fn: Callable[[xr.Dataset], xr.Dataset | NDArray[np.floating]], *, copy: bool = True, selections: Mapping[str, SelType] | None = None, @@ -94,7 +94,7 @@ def apply_over_impl( @overload def apply_over_impl( data: xr.DataArray, - fn: Callable[[xr.DataArray], xr.DataArray | NDArray[np.float64]], + fn: Callable[[xr.DataArray], xr.DataArray | NDArray[np.floating]], *, copy: bool = True, selections: Mapping[str, SelType] | None = None, diff --git a/src/arpes/xarray_extensions/accessor/general.py b/src/arpes/xarray_extensions/accessor/general.py index 0c8bec3b..3ecc3b11 100644 --- a/src/arpes/xarray_extensions/accessor/general.py +++ b/src/arpes/xarray_extensions/accessor/general.py @@ -131,7 +131,7 @@ def filter_vars( def shift_meshgrid( self, dims: tuple[str, ...], - shift: NDArray[np.float64] | float, + shift: NDArray[np.floating] | float, ) -> xr.Dataset: """Shifts the meshgrid coordinates for specified dimensions. @@ -145,10 +145,10 @@ def shift_meshgrid( dims (tuple[str, ...]): A tuple of strings specifying the names of the dimensions whose coordinates will be shifted. These dimensions should typically form a meshgrid. - shift (NDArray[np.float64] | float): The amount(s) by which to shift + shift (NDArray[np.floating] | float): The amount(s) by which to shift the coordinates. - If a `float`, the same scalar shift is applied uniformly to all dimensions. - - If an `NDArray[np.float64]`, it must be a 1D array with a + - If an `NDArray[np.floating]`, it must be a 1D array with a length equal to `len(dims)`. Each element in the array corresponds to the shift applied to the coordinate of the respective dimension in `dims`. @@ -193,9 +193,9 @@ def shift_meshgrid( """ shift_array = np.ones((len(dims),)) * shift if isinstance(shift, float) else shift - def transform(data: NDArray[np.float64]) -> NDArray[np.float64]: + def transform(data: NDArray[np.floating]) -> NDArray[np.floating]: assert isinstance(shift_array, np.ndarray) - new_shift: NDArray[np.float64] = shift_array + new_shift: NDArray[np.floating] = shift_array for _ in range(len(dims)): new_shift = np.expand_dims(new_shift, axis=0) @@ -206,7 +206,7 @@ def transform(data: NDArray[np.float64]) -> NDArray[np.float64]: def scale_meshgrid( self, dims: tuple[str, ...], - scale: float | NDArray[np.float64], + scale: float | NDArray[np.floating], ) -> xr.Dataset: """Scales the meshgrid coordinates for specified dimensions. @@ -217,11 +217,11 @@ def scale_meshgrid( Args: dims (tuple[str, ...]): A tuple of strings specifying the names of the dimensions whose coordinates will be scaled. - scale (float | NDArray[np.float64]): The amount(s) by which to scale the coordinates. + scale (float | NDArray[np.floating]): The amount(s) by which to scale the coordinates. - If a `float`, the same scalar scaling factor is applied uniformly to all specified dimensions. - - If an `NDArray[np.float64]`, it can be a 1D array or a 2D matrix. + - If an `NDArray[np.floating]`, it can be a 1D array or a 2D matrix. If 1D, its length must equal `len(dims)`. Each element represents the scaling factor for the corresponding dimension. This is converted internally into a diagonal scaling matrix. If 2D, it must be a square matrix of shape @@ -282,7 +282,7 @@ def scale_meshgrid( def transform_meshgrid( self, dims: Collection[str], - transform: NDArray[np.float64] | Callable, + transform: NDArray[np.floating] | Callable, ) -> xr.Dataset: r"""Transforms the given meshgrid coordinates by an arbitrary function or matrix. @@ -306,15 +306,15 @@ def transform_meshgrid( dimensions whose coordinates should be transformed. These dimensions are assumed to form a meshgrid. The order of dimensions in this collection matters, as it defines the order of columns in the stacked coordinate array passed to `transform`. - transform (NDArray[np.float64] | Callable[[NDArray[np.float64]], NDArray[np.float64]]): + transform (NDArray[np.floating] | Callable[[NDArray[np.floating]], NDArray[np.floating]]): The transformation to apply to the stacked meshgrid coordinates. This can be one of two types: - - `NDArray[np.float64]`: A 2D NumPy array representing a **linear transformation + - `NDArray[np.floating]`: A 2D NumPy array representing a **linear transformation matrix**. This matrix will be right-multiplied onto the stacked coordinate array. Its shape must be `(len(dims), len(dims))`. This is suitable for operations like rotation, scaling (including non-uniform), and shearing. - - `Callable[[NDArray[np.float64]], NDArray[np.float64]]`: A + - `Callable[[NDArray[np.floating]], NDArray[np.floating]]`: A function that takes a single NumPy array as input and returns a NumPy array. The input array will have the shape `(..., len(dims))`, where `...` represents the original @@ -362,7 +362,7 @@ def transform_meshgrid( [0. 1.]] >>> # Example 2: Non-linear transformation (squaring each coordinate) - >>> def square_coords(coords_array: NDArray[np.float64]) -> NDArray[np.float64]: + >>> def square_coords(coords_array: NDArray[np.floating]) -> NDArray[np.floating]: ... return coords_array**2 >>> squared_ds = ds.G.transform_meshgrid(dims=("x_coord", "y_coord"), transform=square_coords) @@ -383,7 +383,7 @@ def transform_meshgrid( `~.GenericDatasetAccessor.scale_meshgrid`: A specialized linear transformation for multiplicative scaling. `numpy.meshgrid`: For understanding how meshgrid coordinates are typically structured. - """ + """ # noqa: E501 assert isinstance(self._obj, xr.Dataset) as_ndarray = np.stack([self._obj.data_vars[d].values for d in dims], axis=-1) @@ -401,7 +401,7 @@ def transform_meshgrid( def round_coordinates( self, - coords_to_round: dict[str, list[float] | NDArray[np.float64]], + coords_to_round: dict[str, list[float] | NDArray[np.floating]], *, as_indices: bool = False, ) -> dict[str, float | int]: @@ -413,7 +413,7 @@ def round_coordinates( rounded coordinates as their integer indices. Args: - coords_to_round (dict[str, list[float] | NDArray[np.float64]]): + coords_to_round (dict[str, list[float] | NDArray[np.floatiing]]): A dictionary where keys are dimension names (strings) and values are the target coordinate points (floats or arrays of floats) to be rounded to the nearest existing coordinate in the dataset. @@ -475,7 +475,7 @@ def filter_coord( def apply_over( self, - fn: Callable[[xr.Dataset], xr.Dataset | NDArray[np.float64]], + fn: Callable[[xr.Dataset], xr.Dataset | NDArray[np.floating]], *, copy: bool = True, selections: Mapping[str, SelType] | None = None, @@ -539,7 +539,7 @@ def argmax_coords(self) -> dict[Hashable, float]: flat_indices = np.unravel_index(idx, data.values.shape) return {d: data.coords[d][flat_indices[i]].item() for i, d in enumerate(data.dims)} - def ravel(self) -> Mapping[Hashable, xr.DataArray | NDArray[np.float64]]: + def ravel(self) -> Mapping[Hashable, xr.DataArray | NDArray[np.floating]]: """Converts to a flat representation where the coordinate values are also present. Extremely valuable for plotting a dataset with coordinates, X, Y and values Z(X,Y) @@ -571,7 +571,7 @@ def meshgrid( self, *, as_dataset: bool = False, - ) -> dict[Hashable, NDArray[np.float64]] | xr.Dataset: + ) -> dict[Hashable, NDArray[np.floating]] | xr.Dataset: r"""Creates a meshgrid from the DataArray's dimensions and includes its values. optionally returning it as an xarray.Dataset. @@ -587,7 +587,7 @@ def meshgrid( If `False` (default), the result is a dictionary of NumPy arrays. Returns: - dict[Hashable, NDArray[np.float64]] | xr.Dataset: + dict[Hashable, NDArray[np.floating]] | xr.Dataset: - If `as_dataset` is `False`: A dictionary where keys are dimension names and `"data"`, and values are multi-dimensional NumPy arrays representing the meshgrid coordinates and the original data values. @@ -899,7 +899,7 @@ def transform( def map( self, - fn: Callable[[NDArray[np.float64], Any], NDArray[np.float64]], + fn: Callable[[NDArray[np.floating], Any], NDArray[np.floating]], **kwargs: Incomplete, ) -> xr.DataArray: """Applies the specified function to the values of an xarray and returns a new DataArray. @@ -915,7 +915,7 @@ def map( def shift_by( self, - other: xr.DataArray | NDArray[np.float64], + other: xr.DataArray | NDArray[np.floating], shift_axis: str = "", by_axis: str = "", *, @@ -973,7 +973,7 @@ def shift_coords_by( def with_values( self, - new_values: NDArray[np.float64], + new_values: NDArray[np.floating], *, keep_attrs: bool = True, ) -> xr.DataArray: @@ -1009,7 +1009,7 @@ def with_values( def round_coordinates( self, - coords_to_round: dict[str, list[float] | NDArray[np.float64]], + coords_to_round: dict[str, list[float] | NDArray[np.floating]], *, as_indices: bool = False, ) -> dict[str, float | int]: @@ -1026,7 +1026,7 @@ def filter_coord( def apply_over( self, - fn: Callable[[xr.DataArray], xr.DataArray | NDArray[np.float64]], + fn: Callable[[xr.DataArray], xr.DataArray | NDArray[np.floating]], *, copy: bool = True, selections: Mapping[str, SelType] | None = None, diff --git a/src/arpes/xarray_extensions/accessor/property.py b/src/arpes/xarray_extensions/accessor/property.py index ab625ddc..4df95bb5 100644 --- a/src/arpes/xarray_extensions/accessor/property.py +++ b/src/arpes/xarray_extensions/accessor/property.py @@ -553,7 +553,7 @@ def beamline_info(self) -> LightSourceInfo: return beamline_info @property - def sweep_settings(self) -> dict[str, xr.DataArray | NDArray[np.float64] | float | None]: + def sweep_settings(self) -> dict[str, xr.DataArray | NDArray[np.floating] | float | None]: """For datasets acquired with swept acquisition settings, provides those settings.""" return { "high_energy": self._obj.attrs.get("sweep_high_energy"), diff --git a/tests/test_analysis_background.py b/tests/test_analysis_background.py index e17debb1..5f576c1b 100644 --- a/tests/test_analysis_background.py +++ b/tests/test_analysis_background.py @@ -10,13 +10,13 @@ @pytest.fixture -def lorentzian_curve() -> NDArray[np.float64]: +def lorentzian_curve() -> NDArray[np.floating]: x = np.linspace(-2, 0, 100) return lorentzian(x, 0.05, -0.5, 0.1) @pytest.fixture -def lorentzian_linear_bg(lorentzian_curve: NDArray[np.float64]): +def lorentzian_linear_bg(lorentzian_curve: NDArray[np.floating]): x = np.linspace(-2, 0, 100) y = x + lorentzian_curve return xr.DataArray(y, coords={"eV": x}, dims="eV") From 349ee32f538c3bb264b2ff20ce658fd524b0caf8 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Tue, 27 Jan 2026 18:41:53 +0900 Subject: [PATCH 15/16] =?UTF-8?q?=F0=9F=8E=A8=20=20Apply=20ruff?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/utilities/conversion/kx_ky_conversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arpes/utilities/conversion/kx_ky_conversion.py b/src/arpes/utilities/conversion/kx_ky_conversion.py index 8e8ce6f5..7514bbb9 100644 --- a/src/arpes/utilities/conversion/kx_ky_conversion.py +++ b/src/arpes/utilities/conversion/kx_ky_conversion.py @@ -252,7 +252,7 @@ def kspace_to_phi( return self.phi def conversion_for( - self, dim: Hashable + self, dim: Hashable, ) -> Callable[[NDArray[np.floating]], NDArray[np.floating]]: """Looks up the appropriate momentum-to-angle conversion routine by dimension name.""" From 0cdc8cfbb83f2a95775adf4a6ec23e177d30e849 Mon Sep 17 00:00:00 2001 From: Ryuichi Arafune Date: Tue, 27 Jan 2026 18:49:50 +0900 Subject: [PATCH 16/16] =?UTF-8?q?=F0=9F=8E=A8=20=20Add=20noqa?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/arpes/configuration/workspace.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/arpes/configuration/workspace.py b/src/arpes/configuration/workspace.py index cf88bd15..b14a1dcd 100644 --- a/src/arpes/configuration/workspace.py +++ b/src/arpes/configuration/workspace.py @@ -8,12 +8,17 @@ prefer using `config_manager.enter_workspace(...)` and `exit_workspace()`. """ +from __future__ import annotations + import warnings from contextlib import ContextDecorator -from types import TracebackType +from typing import TYPE_CHECKING from arpes.configuration.manager import config_manager +if TYPE_CHECKING: + from types import TracebackType + __all__ = ["WorkspaceManager"] @@ -50,7 +55,7 @@ def __init__(self, workspace_name: str = "") -> None: self.workspace_name = workspace_name self._active = bool(workspace_name) - def __enter__(self) -> "WorkspaceManager": + def __enter__(self) -> WorkspaceManager: # noqa: PYI034 """Enter the runtime context for the WorkspaceManager. Returns: