diff --git a/CHANGES.rst b/CHANGES.rst index c80832fcf..c15c8f8a9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -20,6 +20,7 @@ New features and enhancements Breaking changes ^^^^^^^^^^^^^^^^ +* With pandas 2.2 and xarray 2023.11.0, many frequency strings where changed : Y becomes YE, M -> ME, Q -> QE. A and AS are removed. T, L, U, N become min, ms, us and ns. * `bump2version` has been replaced with `bump-my-version` to bump the version number using configurations set in the `pyproject.toml` file. (:issue:`1557`, :pull:`1569`). * `xclim`'s units registry and units formatting are now extended from `cf-xarray`. The exponent sign "^" is now never added in the ``units`` attribute. For example, square meters are given as "m2" instead of "m^2" by xclim, both are still accepted as input. (:issue:`1010`, :pull:`1590`). * `yamale` is now listed as a core dependency (was previously listed in the `dev` installation recipe). (:issue:`1595`, :pull:`1596`). diff --git a/environment.yml b/environment.yml index 04ae0e482..85ddbd58d 100644 --- a/environment.yml +++ b/environment.yml @@ -16,14 +16,14 @@ dependencies: - lmoments3 - numba - numpy >=1.16 - - pandas >=0.23,<2.2 + - pandas >=2.2 - pint >=0.9 - poppler >=0.67 - pyyaml - scikit-learn >=0.21.3 - scipy >=1.2 - statsmodels - - xarray >=2022.06.0,<2023.11.0 + - xarray >=2023.11.0 - yamale # Extras - eofs diff --git a/pyproject.toml b/pyproject.toml index 7b822c8ec..94526cdc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,14 +44,13 @@ dependencies = [ "lmoments3>=1.0.5", "numba", "numpy>=1.16", - "pandas>=0.23,<2.0; python_version == '3.8'", - "pandas>=0.23,<2.2; python_version >= '3.9'", + "pandas>=2.2; python_version >= '3.9'", "pint>=0.10", "pyyaml", "scikit-learn>=0.21.3", "scipy>=1.2", "statsmodels", - "xarray>=2022.06.0,<2023.11.0", + "xarray>=2023.11.0", "yamale" ] diff --git a/tests/test_atmos.py b/tests/test_atmos.py index 01ebcb3ea..214e0dbfc 100644 --- a/tests/test_atmos.py +++ b/tests/test_atmos.py @@ -272,7 +272,7 @@ def test_wind_power_potential_from_3h_series(): from xclim.testing.helpers import test_timeseries w = test_timeseries( - np.ones(96) * 15, variable="sfcWind", start="7/1/2000", units="m s-1", freq="3H" + np.ones(96) * 15, variable="sfcWind", start="7/1/2000", units="m s-1", freq="3h" ) out = atmos.wind_power_potential(wind_speed=w) diff --git a/tests/test_bootstrapping.py b/tests/test_bootstrapping.py index 9cbc92c30..be52be2f0 100644 --- a/tests/test_bootstrapping.py +++ b/tests/test_bootstrapping.py @@ -26,9 +26,9 @@ class Test_bootstrap: "var,p,index,freq, cftime", ( ["tas", 98, tg90p, "MS", False], - ["tasmin", 98, tn90p, "A-JUL", False], - ["tasmax", 98, tx90p, "Q-APR", False], - ["tasmax", 98, tx90p, "Q-APR", True], + ["tasmin", 98, tn90p, "YE-JUL", False], + ["tasmax", 98, tx90p, "QE-APR", False], + ["tasmax", 98, tx90p, "QE-APR", True], ["tasmin", 2, tn10p, "MS", False], ["tasmax", 2, tx10p, "YS", False], ["tasmax", 2, tx10p, "YS", True], diff --git a/tests/test_calendar.py b/tests/test_calendar.py index aca933cbf..1f6dbee5b 100644 --- a/tests/test_calendar.py +++ b/tests/test_calendar.py @@ -58,7 +58,7 @@ def da(index): ) -@pytest.mark.parametrize("freq", ["6480H", "302431T", "23144781S"]) +@pytest.mark.parametrize("freq", ["6480h", "302431min", "23144781s"]) def test_time_bnds(freq, datetime_index, cftime_index): da_datetime = da(datetime_index).resample(time=freq) da_cftime = da(cftime_index).resample(time=freq) @@ -91,11 +91,11 @@ def test_time_bnds_irregular(typ): start = xr.cftime_range("1990-01-01", periods=24, freq="MS") # Well. xarray string parsers do not support sub-second resolution, but cftime does. end = xr.cftime_range( - "1990-01-01T23:59:59", periods=24, freq="M" + "1990-01-01T23:59:59", periods=24, freq="ME" ) + pd.Timedelta(0.999999, "s") elif typ == "pd": start = pd.date_range("1990-01-01", periods=24, freq="MS") - end = pd.date_range("1990-01-01 23:59:59.999999999", periods=24, freq="M") + end = pd.date_range("1990-01-01 23:59:59.999999999", periods=24, freq="ME") time = start + (end - start) / 2 @@ -147,7 +147,7 @@ def test_percentile_doy_invalid(): tas = xr.DataArray( [0, 1], dims=("time",), - coords={"time": pd.date_range("2000-01-01", periods=2, freq="H")}, + coords={"time": pd.date_range("2000-01-01", periods=2, freq="h")}, ) with pytest.raises(ValueError): percentile_doy(tas) @@ -156,10 +156,10 @@ def test_percentile_doy_invalid(): @pytest.mark.parametrize( "freqA,op,freqB,exp", [ - ("D", ">", "H", True), + ("D", ">", "h", True), ("2YS", "<=", "QS-DEC", False), ("4W", "==", "3W", False), - ("24H", "==", "D", True), + ("24h", "==", "D", True), ], ) def test_compare_offsets(freqA, op, freqB, exp): @@ -276,8 +276,8 @@ def test_get_calendar_errors(obj): ("standard", "noleap", True, "D"), ("noleap", "default", True, "D"), ("noleap", "all_leap", False, "D"), - ("proleptic_gregorian", "noleap", False, "4H"), - ("default", "noleap", True, "4H"), + ("proleptic_gregorian", "noleap", False, "4h"), + ("default", "noleap", True, "4h"), ], ) def test_convert_calendar(source, target, target_as_str, freq): @@ -312,7 +312,7 @@ def test_convert_calendar(source, target, target_as_str, freq): [ ("standard", "360_day", "D"), ("360_day", "default", "D"), - ("proleptic_gregorian", "360_day", "4H"), + ("proleptic_gregorian", "360_day", "4h"), ], ) @pytest.mark.parametrize("align_on", ["date", "year"]) @@ -332,17 +332,17 @@ def test_convert_calendar_360_days(source, target, freq, align_on): if align_on == "date": np.testing.assert_array_equal( - conv.time.resample(time="M").last().dt.day, + conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30], ) elif target == "360_day": np.testing.assert_array_equal( - conv.time.resample(time="M").last().dt.day, + conv.time.resample(time="ME").last().dt.day, [30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 29], ) else: np.testing.assert_array_equal( - conv.time.resample(time="M").last().dt.day, + conv.time.resample(time="ME").last().dt.day, [30, 29, 30, 30, 31, 30, 30, 31, 30, 31, 29, 31], ) if source == "360_day" and align_on == "year": @@ -357,7 +357,7 @@ def test_convert_calendar_360_days_random(): dims=("time",), coords={ "time": date_range( - "2004-01-01", "2004-12-31T23:59:59", freq="12H", calendar="default" + "2004-01-01", "2004-12-31T23:59:59", freq="12h", calendar="default" ) }, ) @@ -366,7 +366,7 @@ def test_convert_calendar_360_days_random(): dims=("time",), coords={ "time": date_range( - "2004-01-01", "2004-12-30T23:59:59", freq="12H", calendar="360_day" + "2004-01-01", "2004-12-30T23:59:59", freq="12h", calendar="360_day" ) }, ) @@ -395,8 +395,8 @@ def test_convert_calendar_360_days_random(): "source,target,freq", [ ("standard", "noleap", "D"), - ("noleap", "default", "4H"), - ("noleap", "all_leap", "M"), + ("noleap", "default", "4h"), + ("noleap", "all_leap", "ME"), ("360_day", "noleap", "D"), ("noleap", "360_day", "D"), ], @@ -556,7 +556,7 @@ def test_clim_mean_doy(tas_series): def test_doy_to_days_since(): # simple test - time = date_range("2020-07-01", "2022-07-01", freq="AS-JUL") + time = date_range("2020-07-01", "2022-07-01", freq="YS-JUL") da = xr.DataArray( [190, 360, 3], dims=("time",), @@ -587,7 +587,7 @@ def test_doy_to_days_since(): xr.testing.assert_identical(da, da2) # with start - time = date_range("2020-12-31", "2022-12-31", freq="Y") + time = date_range("2020-12-31", "2022-12-31", freq="YE") da = xr.DataArray( [190, 360, 3], dims=("time",), @@ -624,10 +624,10 @@ def test_doy_to_days_since(): @pytest.mark.parametrize( "freq,em,eb,es,ea", [ - ("4AS-JUL", 4, "A", True, "JUL"), - ("M", 1, "M", False, None), - ("YS", 1, "A", True, "JAN"), - ("3A", 3, "A", False, "DEC"), + ("4YS-JUL", 4, "Y", True, "JUL"), + ("ME", 1, "M", False, None), + ("YS", 1, "Y", True, "JAN"), + ("3YE", 3, "Y", False, "DEC"), ("D", 1, "D", True, None), ("3W", 21, "D", True, None), ], @@ -649,8 +649,8 @@ def test_parse_offset_invalid(): @pytest.mark.parametrize( "m,b,s,a,exp", [ - (1, "A", True, None, "AS-JAN"), - (2, "Q", False, "DEC", "2Q-DEC"), + (1, "Y", True, None, "YS-JAN"), + (2, "Q", False, "DEC", "2QE-DEC"), (1, "D", False, None, "D"), ], ) @@ -694,7 +694,7 @@ def test_convert_doy(): dims=("time",), coords={ "time": xr.date_range( - "2000-01-01", periods=5, freq="AS-JUL", calendar="standard" + "2000-01-01", periods=5, freq="YS-JUL", calendar="standard" ) }, attrs={"is_dayofyear": 1, "calendar": "standard"}, diff --git a/tests/test_checks.py b/tests/test_checks.py index 7f54dd66f..fedf8f346 100644 --- a/tests/test_checks.py +++ b/tests/test_checks.py @@ -108,7 +108,7 @@ def test_assert_daily(self, date_range): def test_bad_frequency(self, date_range): with pytest.raises(ValidationError): n = 365 - times = date_range("2000-01-01", freq="12H", periods=n) + times = date_range("2000-01-01", freq="12h", periods=n) da = xr.DataArray(np.arange(n), [("time", times)], attrs=self.tas_attrs) tg_mean(da) @@ -116,7 +116,7 @@ def test_bad_frequency(self, date_range): def test_decreasing_index(self, date_range): with pytest.raises(ValidationError): n = 365 - times = date_range("2000-01-01", freq="12H", periods=n) + times = date_range("2000-01-01", freq="12h", periods=n) da = xr.DataArray( np.arange(n), [("time", times[::-1])], attrs=self.tas_attrs ) @@ -149,25 +149,25 @@ def test_check_hourly(self, date_range, random): } n = 100 - time = date_range("2000-01-01", freq="H", periods=n) + time = date_range("2000-01-01", freq="h", periods=n) da = xr.DataArray(random.random(n), [("time", time)], attrs=tas_attrs) - datachecks.check_freq(da, "H") + datachecks.check_freq(da, "h") - time = date_range("2000-01-01", freq="3H", periods=n) + time = date_range("2000-01-01", freq="3h", periods=n) da = xr.DataArray(random.random(n), [("time", time)], attrs=tas_attrs) with pytest.raises(ValidationError): - datachecks.check_freq(da, "H") + datachecks.check_freq(da, "h") with pytest.raises(ValidationError): - datachecks.check_freq(da, ["H", "D"]) + datachecks.check_freq(da, ["h", "D"]) - datachecks.check_freq(da, "H", strict=False) - datachecks.check_freq(da, ["H", "D"], strict=False) - datachecks.check_freq(da, "3H") - datachecks.check_freq(da, ["H", "3H"]) + datachecks.check_freq(da, "h", strict=False) + datachecks.check_freq(da, ["h", "D"], strict=False) + datachecks.check_freq(da, "3h") + datachecks.check_freq(da, ["h", "3h"]) with pytest.raises(ValidationError, match="Unable to infer the frequency of"): - datachecks.check_freq(da.where(da.time.dt.dayofyear != 5, drop=True), "3H") + datachecks.check_freq(da.where(da.time.dt.dayofyear != 5, drop=True), "3h") def test_common_time(self, tas_series, date_range, random): tas_attrs = { @@ -176,7 +176,7 @@ def test_common_time(self, tas_series, date_range, random): } n = 100 - time = date_range("2000-01-01", freq="H", periods=n) + time = date_range("2000-01-01", freq="h", periods=n) da = xr.DataArray(random.random(n), [("time", time)], attrs=tas_attrs) # No freq @@ -187,7 +187,7 @@ def test_common_time(self, tas_series, date_range, random): datachecks.check_common_time([db, da]) # Not same freq - time = date_range("2000-01-01", freq="6H", periods=n) + time = date_range("2000-01-01", freq="6h", periods=n) db = xr.DataArray(random.random(n), [("time", time)], attrs=tas_attrs) with pytest.raises(ValidationError, match="Inputs have different frequencies"): datachecks.check_common_time([db, da]) @@ -197,6 +197,6 @@ def test_common_time(self, tas_series, date_range, random): db["time"] = db.time + pd.Timedelta(30, "min") with pytest.raises( ValidationError, - match=r"All inputs have the same frequency \(H\), but they are not anchored on the same minutes", + match=r"All inputs have the same frequency \(h\), but they are not anchored on the same minutes", ): datachecks.check_common_time([db, da]) diff --git a/tests/test_ensembles.py b/tests/test_ensembles.py index d093180f1..aa39b42d6 100644 --- a/tests/test_ensembles.py +++ b/tests/test_ensembles.py @@ -129,7 +129,7 @@ def test_create_unequal_times(self, ensemble_dataset_objects, open_dataset): [(xr.cftime_range, {"calendar": "360_day"}), (pd.date_range, {})], ) def test_create_unaligned_times(self, timegen, calkw): - t1 = timegen("2000-01-01", periods=24, freq="M", **calkw) + t1 = timegen("2000-01-01", periods=24, freq="ME", **calkw) t2 = timegen("2000-01-01", periods=24, freq="MS", **calkw) d1 = xr.DataArray( diff --git a/tests/test_ffdi.py b/tests/test_ffdi.py index 3fcf70664..1449f0453 100644 --- a/tests/test_ffdi.py +++ b/tests/test_ffdi.py @@ -149,7 +149,7 @@ def test_ffdi_indicators(self, open_dataset, init_kbdi, limiting_func): # outputs look sensible test_data = open_dataset(data_url) - pr_annual = test_data["pr"].resample(time="A").mean().mean("time") + pr_annual = test_data["pr"].resample(time="YS").mean().mean("time") pr_annual.attrs["units"] = test_data["pr"].attrs["units"] if init_kbdi: diff --git a/tests/test_generic.py b/tests/test_generic.py index 21a4f9394..9a7e6e7a0 100644 --- a/tests/test_generic.py +++ b/tests/test_generic.py @@ -26,7 +26,7 @@ def test_season_default(self, q_series): def test_season(self, q_series): q = q_series(np.arange(1000)) - o = generic.select_resample_op(q, "count", freq="AS-DEC", season="DJF") + o = generic.select_resample_op(q, "count", freq="YS-DEC", season="DJF") assert o[0] == 31 + 29 @@ -97,7 +97,7 @@ def test_calendars(self): ) out = generic.aggregate_between_dates( - data_std, start_std, end_std, op="sum", freq="AS-JUL" + data_std, start_std, end_std, op="sum", freq="YS-JUL" ) # expected output @@ -110,7 +110,7 @@ def test_calendars(self): # check calendar conversion out_noleap = generic.aggregate_between_dates( - data_std, start_std, end_noleap, op="sum", freq="AS-JUL" + data_std, start_std, end_noleap, op="sum", freq="YS-JUL" ) np.testing.assert_allclose(out, out_noleap) diff --git a/tests/test_generic_indicators.py b/tests/test_generic_indicators.py index b197eb88e..fa9737ff8 100644 --- a/tests/test_generic_indicators.py +++ b/tests/test_generic_indicators.py @@ -104,7 +104,7 @@ def test_missing(self, ndq_series): np.testing.assert_array_equal(out.sel(time="1902").isnull(), True) def test_3hourly(self, pr_hr_series, random): - pr = pr_hr_series(random.random(366 * 24)).resample(time="3H").mean() + pr = pr_hr_series(random.random(366 * 24)).resample(time="3h").mean() out = generic.stats(pr, freq="MS", op="var") assert out.units == "kg2 m-4 s-2" assert out.long_name == "Variance of variable" diff --git a/tests/test_helpers.py b/tests/test_helpers.py index fc808570d..2cb66ee39 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -88,7 +88,7 @@ def test_day_lengths(method): def test_cosine_of_solar_zenith_angle(): - time = xr.date_range("1900-01-01T00:30", "1900-01-03", freq="H") + time = xr.date_range("1900-01-01T00:30", "1900-01-03", freq="h") time = xr.DataArray(time, dims=("time",), coords={"time": time}, name="time") lat = xr.DataArray( [0, 45, 70], dims=("site",), name="lat", attrs={"units": "degree_north"} diff --git a/tests/test_indicators.py b/tests/test_indicators.py index 80ee3901f..d3026af45 100644 --- a/tests/test_indicators.py +++ b/tests/test_indicators.py @@ -816,7 +816,7 @@ def test_resampling_indicator_with_indexing(tas_series): np.testing.assert_allclose(out, [28, 29]) out = xclim.atmos.tx_days_above( - tas, thresh="0 degC", freq="AS-JUL", doy_bounds=(1, 50) + tas, thresh="0 degC", freq="YS-JUL", doy_bounds=(1, 50) ) np.testing.assert_allclose(out, [50, 50, np.NaN]) diff --git a/tests/test_indices.py b/tests/test_indices.py index 5a7e0eb5d..e6c7859cc 100644 --- a/tests/test_indices.py +++ b/tests/test_indices.py @@ -121,7 +121,7 @@ def test_simple(self, tas_series): a[80:100] -= 30 # at the end and beginning da = tas_series(a + K2C) - out = xci.cold_spell_days(da, thresh="-10. C", freq="M") + out = xci.cold_spell_days(da, thresh="-10. C", freq="ME") np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "d" @@ -135,7 +135,7 @@ def test_simple(self, tas_series): a[95:101] -= 30 da = tas_series(a + K2C, start="1971-01-01") - out = xci.cold_spell_frequency(da, thresh="-10. C", freq="M") + out = xci.cold_spell_frequency(da, thresh="-10. C", freq="ME") np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "" @@ -153,7 +153,7 @@ def test_simple(self, tas_series): a[95:101] -= 30 da = tas_series(a + K2C, start="1971-01-01") - out = xci.cold_spell_max_length(da, thresh="-10. C", freq="M") + out = xci.cold_spell_max_length(da, thresh="-10. C", freq="ME") np.testing.assert_array_equal(out, [10, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "d" @@ -171,7 +171,7 @@ def test_simple(self, tas_series): a[95:101] -= 30 da = tas_series(a + K2C, start="1971-01-01") - out = xci.cold_spell_total_length(da, thresh="-10. C", freq="M") + out = xci.cold_spell_total_length(da, thresh="-10. C", freq="ME") np.testing.assert_array_equal(out, [10, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "d" @@ -702,7 +702,7 @@ def test_simple(self, tasmin_series, tasmax_series, thresholds): mn = tasmin_series(mn + K2C) mx = tasmax_series(mx + K2C) out = xci.multiday_temperature_swing( - mn, mx, **thresholds, op="sum", window=1, freq="M" + mn, mx, **thresholds, op="sum", window=1, freq="ME" ) np.testing.assert_array_equal(out[:2], [5, 1]) np.testing.assert_array_equal(out[2:], 0) @@ -976,7 +976,7 @@ def test_southhemisphere(self, tas_series): tas = tas_series(np.zeros(2 * 365), start="2000/1/1") warm_period = tas.sel(time=slice("2000-11-01", "2001-03-01")) tas = tas.where(~tas.time.isin(warm_period.time), 280) - gsl = xci.growing_season_length(tas, mid_date="01-01", freq="AS-Jul") + gsl = xci.growing_season_length(tas, mid_date="01-01", freq="YS-JUL") np.testing.assert_array_equal(gsl.sel(time="2000-07-01"), 121) @@ -1088,7 +1088,7 @@ def test_southhemisphere(self, tasmin_series): tasmin = tasmin_series(np.zeros(2 * 365) + 270, start="2000/1/1") warm_period = tasmin.sel(time=slice("2000-11-01", "2001-03-01")) tasmin = tasmin.where(~tasmin.time.isin(warm_period.time), 300) - fsl = xci.frost_free_season_length(tasmin, freq="AS-JUL", mid_date="01-01") + fsl = xci.frost_free_season_length(tasmin, freq="YS-JUL", mid_date="01-01") np.testing.assert_array_equal(fsl.sel(time="2000-07-01"), 121) @@ -1120,7 +1120,7 @@ def test_simple(self, tasmax_series): a[80:100] += 30 # at the end and beginning da = tasmax_series(a + K2C) - out = xci.heat_wave_index(da, thresh="25 C", freq="M") + out = xci.heat_wave_index(da, thresh="25 C", freq="ME") np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -1500,7 +1500,7 @@ def test_simple(self, pr_series, tas_series): tas[14:] += 10 tas = tas_series(tas + K2C) - out = xci.liquid_precip_ratio(pr, tas=tas, freq="M") + out = xci.liquid_precip_ratio(pr, tas=tas, freq="ME") np.testing.assert_almost_equal(out[:1], [0.6]) @@ -1509,14 +1509,14 @@ def test_simple(self, pr_series): a = np.zeros(365) + 10 a[5:15] = 0 pr = pr_series(a) - out = xci.maximum_consecutive_dry_days(pr, freq="M") + out = xci.maximum_consecutive_dry_days(pr, freq="ME") assert out[0] == 10 def test_run_start_at_0(self, pr_series): a = np.zeros(365) + 10 a[:10] = 0 pr = pr_series(a) - out = xci.maximum_consecutive_dry_days(pr, freq="M") + out = xci.maximum_consecutive_dry_days(pr, freq="ME") assert out[0] == 10 @pytest.mark.parametrize( @@ -1531,7 +1531,7 @@ def test_resampling_order(self, pr_series, resample_before_rl, expected): a[5:35] = 0 pr = pr_series(a) out = xci.maximum_consecutive_dry_days( - pr, freq="M", resample_before_rl=resample_before_rl + pr, freq="ME", resample_before_rl=resample_before_rl ) assert out[0] == expected @@ -1541,7 +1541,7 @@ def test_simple(self, tasmax_series): a = np.zeros(365) + 273.15 a[5:15] += 30 tx = tasmax_series(a, start="1/1/2010") - out = xci.maximum_consecutive_tx_days(tx, thresh="25 C", freq="M") + out = xci.maximum_consecutive_tx_days(tx, thresh="25 C", freq="ME") assert out[0] == 10 np.testing.assert_array_almost_equal(out[1:], 0) @@ -1567,7 +1567,7 @@ def test_simple(self, pr_series): pr[5:10] = 1 pr = pr_series(pr) - out = xci.precip_accumulation(pr, freq="M") + out = xci.precip_accumulation(pr, freq="ME") np.testing.assert_array_equal(out[0], 5 * 3600 * 24) def test_yearly(self): @@ -1588,11 +1588,11 @@ def test_mixed_phases(self, pr_series, tas_series): tas[10:15] = 268 tas = tas_series(tas) - outsn = xci.precip_accumulation(pr, tas=tas, phase="solid", freq="M") + outsn = xci.precip_accumulation(pr, tas=tas, phase="solid", freq="ME") outsn2 = xci.precip_accumulation( - pr, tas=tas, phase="solid", thresh="269 K", freq="M" + pr, tas=tas, phase="solid", thresh="269 K", freq="ME" ) - outrn = xci.precip_accumulation(pr, tas=tas, phase="liquid", freq="M") + outrn = xci.precip_accumulation(pr, tas=tas, phase="liquid", freq="ME") np.testing.assert_array_equal(outsn[0], 10 * 3600 * 24) np.testing.assert_array_equal(outsn2[0], 5 * 3600 * 24) @@ -1620,7 +1620,7 @@ def test_simple(self, pr_series): pr[5:10] = 1 pr = pr_series(pr) - out = xci.precip_average(pr, freq="M") + out = xci.precip_average(pr, freq="ME") np.testing.assert_array_equal(out[0], 5 * 3600 * 24 / 31) def test_yearly(self): @@ -1639,11 +1639,11 @@ def test_mixed_phases(self, pr_series, tas_series): tas[10:15] = 268 tas = tas_series(tas) - outsn = xci.precip_average(pr, tas=tas, phase="solid", freq="M") + outsn = xci.precip_average(pr, tas=tas, phase="solid", freq="ME") outsn2 = xci.precip_average( - pr, tas=tas, phase="solid", thresh="269 K", freq="M" + pr, tas=tas, phase="solid", thresh="269 K", freq="ME" ) - outrn = xci.precip_average(pr, tas=tas, phase="liquid", freq="M") + outrn = xci.precip_average(pr, tas=tas, phase="liquid", freq="ME") np.testing.assert_array_equal(outsn[0], 10 * 3600 * 24 / 31) np.testing.assert_array_equal(outsn2[0], 5 * 3600 * 24 / 31) @@ -2301,7 +2301,7 @@ def test_calm_days(self, sfcWind_series): a[10:20] = 2 # non-calm day on default thres, but should count as calm in test a[40:50] = 3.1 # non-calm day on test threshold da = sfcWind_series(a) - out = xci.calm_days(da, thresh="3 km h-1", freq="M") + out = xci.calm_days(da, thresh="3 km h-1", freq="ME") np.testing.assert_array_equal(out, [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "d" @@ -2311,7 +2311,7 @@ def test_windy_days(self, sfcWind_series): a[40:50] = 12 # windy day on test threshold a[80:90] = 15 # windy days da = sfcWind_series(a) - out = xci.windy_days(da, thresh="12 km h-1", freq="M") + out = xci.windy_days(da, thresh="12 km h-1", freq="ME") np.testing.assert_array_equal(out, [0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert out.units == "d" @@ -2949,7 +2949,7 @@ def test_freezethaw_spell(tasmin_series, tasmax_series, op, exp): tasmin = tasmin_series(tmin + K2C) out = xci.multiday_temperature_swing( - tasmin=tasmin, tasmax=tasmax, freq="AS-JUL", window=3, op=op + tasmin=tasmin, tasmax=tasmax, freq="YS-JUL", window=3, op=op ) np.testing.assert_array_equal(out, exp) @@ -3351,7 +3351,7 @@ def test_simple(self, pr_series, prc_series): prc = prc_series(a_prc) prc.attrs["units"] = "mm/day" - out = xci.rprctot(pr, prc, thresh="5 mm/day", freq="M") + out = xci.rprctot(pr, prc, thresh="5 mm/day", freq="ME") np.testing.assert_allclose( out, [ @@ -3380,10 +3380,10 @@ def test_simple(self, pr_series): pr = pr_series(a) pr.attrs["units"] = "mm/day" - out = xci.wetdays(pr, thresh="5 mm/day", freq="M") + out = xci.wetdays(pr, thresh="5 mm/day", freq="ME") np.testing.assert_allclose(out, [5, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0]) - out = xci.wetdays(pr, thresh="5 mm/day", freq="M", op=">") + out = xci.wetdays(pr, thresh="5 mm/day", freq="ME", op=">") np.testing.assert_allclose(out, [4, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]) @@ -3396,10 +3396,10 @@ def test_simple(self, pr_series): pr = pr_series(a) pr.attrs["units"] = "mm/day" - out = xci.wetdays_prop(pr, thresh="5 mm/day", freq="M") + out = xci.wetdays_prop(pr, thresh="5 mm/day", freq="ME") np.testing.assert_allclose(out, [5 / 31, 0, 0, 3 / 31, 0, 0, 0, 0, 0, 0, 0, 0]) - out = xci.wetdays_prop(pr, thresh="5 mm/day", freq="M", op=">") + out = xci.wetdays_prop(pr, thresh="5 mm/day", freq="ME", op=">") np.testing.assert_allclose(out, [4 / 31, 0, 0, 2 / 31, 0, 0, 0, 0, 0, 0, 0, 0]) diff --git a/tests/test_locales.py b/tests/test_locales.py index 6f63977a3..89f074d7b 100644 --- a/tests/test_locales.py +++ b/tests/test_locales.py @@ -15,7 +15,7 @@ esperanto = ( "eo", { - "attrs_mapping": {"modifiers": ["adj"], "AS-*": ["jara"], "MS": ["monata"]}, + "attrs_mapping": {"modifiers": ["adj"], "YS-*": ["jara"], "MS": ["monata"]}, "TG_MEAN": { "long_name": "Meza ciutaga averaga temperaturo", "title": "Meza ciutaga averaga temperaturo", @@ -28,7 +28,7 @@ { "attrs_mapping": { "modifiers": ["nn", "nf"], - "AS-*": ["годовое", "годовая"], + "YS-*": ["годовое", "годовая"], "MS": ["месячный", "месячная"], }, "TG_MEAN": { @@ -97,8 +97,8 @@ def test_local_attrs_multi(tmp_path): def test_local_formatter(): fmt = xloc.get_local_formatter(russian) - assert fmt.format("{freq:nn}", freq="AS-JUL") == "годовое" - assert fmt.format("{freq:nf}", freq="AS-DEC") == "годовая" + assert fmt.format("{freq:nn}", freq="YS-JUL") == "годовое" + assert fmt.format("{freq:nf}", freq="YS-DEC") == "годовая" def test_indicator_output(tas_series): diff --git a/tests/test_missing.py b/tests/test_missing.py index f30c8ec84..197a8446d 100644 --- a/tests/test_missing.py +++ b/tests/test_missing.py @@ -17,25 +17,25 @@ class TestMissingBase: def test_3hourly_input(self, random): """Creating array with 21 days of 3h""" n = 21 * 8 - time = xr.cftime_range(start="2002-01-01", periods=n, freq="3H") + time = xr.cftime_range(start="2002-01-01", periods=n, freq="3h") ts = xr.DataArray(random.random(n), dims="time", coords={"time": time}) - mb = missing.MissingBase(ts, freq="MS", src_timestep="3H") + mb = missing.MissingBase(ts, freq="MS", src_timestep="3h") # Make sure count is 31 * 8, because we're requesting a MS freq. assert mb.count == 31 * 8 def test_monthly_input(self, random): """Creating array with 11 months.""" n = 11 - time = xr.cftime_range(start="2002-01-01", periods=n, freq="M") + time = xr.cftime_range(start="2002-01-01", periods=n, freq="ME") ts = xr.DataArray(random.random(n), dims="time", coords={"time": time}) - mb = missing.MissingBase(ts, freq="YS", src_timestep="M") + mb = missing.MissingBase(ts, freq="YS", src_timestep="ME") # Make sure count is 12, because we're requesting a YS freq. assert mb.count == 12 n = 5 time = xr.cftime_range(start="2002-06-01", periods=n, freq="MS") ts = xr.DataArray(random.random(n), dims="time", coords={"time": time}) - mb = missing.MissingBase(ts, freq="AS", src_timestep="M", season="JJA") + mb = missing.MissingBase(ts, freq="YS", src_timestep="MS", season="JJA") assert mb.count == 3 def test_seasonal_input(self, random): @@ -81,21 +81,21 @@ def test_missing_season(self): n = 378 times = pd.date_range("2001-12-31", freq="1D", periods=n) da = xr.DataArray(np.arange(n), [("time", times)]) - miss = missing.missing_any(da, "Q-NOV") + miss = missing.missing_any(da, "QE-NOV") np.testing.assert_array_equal(miss, [True, False, False, False, True]) def test_to_period_start(self, tasmin_series): a = np.zeros(365) + K2C + 5.0 a[2] -= 20 ts = tasmin_series(a) - miss = missing.missing_any(ts, freq="AS-JUL") + miss = missing.missing_any(ts, freq="YS-JUL") np.testing.assert_equal(miss, [False]) def test_to_period_end(self, tasmin_series): a = np.zeros(365) + K2C + 5.0 a[2] -= 20 ts = tasmin_series(a) - miss = missing.missing_any(ts, freq="A-JUN") + miss = missing.missing_any(ts, freq="YE-JUN") np.testing.assert_equal(miss, [False]) def test_month(self, tasmin_series): @@ -139,14 +139,14 @@ def test_no_freq(self, tasmin_series): t = list(range(31)) t.pop(5) ts2 = ts.isel(time=t) - miss = missing.missing_any(ts2, freq=None, src_timestep="H") + miss = missing.missing_any(ts2, freq=None, src_timestep="h") np.testing.assert_array_equal(miss, True) # With indexer miss = missing.missing_any(ts, freq=None, month=[7]) np.testing.assert_array_equal(miss, False) - miss = missing.missing_any(ts2, freq=None, month=[7], src_timestep="H") + miss = missing.missing_any(ts2, freq=None, month=[7], src_timestep="h") np.testing.assert_array_equal(miss, True) def test_hydro(self, open_dataset): @@ -264,7 +264,7 @@ def pr(self, pr_hr_series): def test_any(self, pr_hr_series): pr = self.pr(pr_hr_series) - out = missing.missing_any(pr, "D", src_timestep="H") + out = missing.missing_any(pr, "D", src_timestep="h") np.testing.assert_array_equal( out, [True] + 8 * [False] + [True], @@ -272,7 +272,7 @@ def test_any(self, pr_hr_series): def test_pct(self, pr_hr_series): pr = self.pr(pr_hr_series) - out = missing.missing_pct(pr, "D", src_timestep="H", tolerance=0.1) + out = missing.missing_pct(pr, "D", src_timestep="h", tolerance=0.1) np.testing.assert_array_equal( out, 9 * [False] + [True], @@ -280,7 +280,7 @@ def test_pct(self, pr_hr_series): def test_at_least_n_valid(self, pr_hr_series): pr = self.pr(pr_hr_series) - out = missing.at_least_n_valid(pr, "D", src_timestep="H", n=20) + out = missing.at_least_n_valid(pr, "D", src_timestep="h", n=20) np.testing.assert_array_equal( out, 9 * [False] + [True], diff --git a/tests/test_partitioning.py b/tests/test_partitioning.py index 070a49d2e..9862e4e60 100644 --- a/tests/test_partitioning.py +++ b/tests/test_partitioning.py @@ -34,7 +34,7 @@ def test_hawkins_sutton_synthetic(random): r = random.standard_normal((4, 13, 60)) x = r + mean[:, :, np.newaxis] - time = xr.date_range("1970-01-01", periods=60, freq="Y") + time = xr.date_range("1970-01-01", periods=60, freq="YE") da = xr.DataArray(x, dims=("scenario", "model", "time"), coords={"time": time}) m, v = hawkins_sutton(da) # Mean uncertainty over time @@ -87,7 +87,7 @@ def test_lafferty_sriver_synthetic(random): r = random.standard_normal((4, 13, 5, 60)) x = r + mean[:, :, :, np.newaxis] - time = xr.date_range("1970-01-01", periods=60, freq="Y") + time = xr.date_range("1970-01-01", periods=60, freq="YE") da = xr.DataArray( x, dims=("scenario", "model", "downscaling", "time"), coords={"time": time} ) diff --git a/tests/test_precip.py b/tests/test_precip.py index 17a1bacc0..8fc3250f7 100644 --- a/tests/test_precip.py +++ b/tests/test_precip.py @@ -40,7 +40,7 @@ def test_3d_data_with_nans(self, open_dataset): out = {} out["start"], out["end"], out["length"] = atmos.rain_season( pr, - freq="AS-JAN", + freq="YS-JAN", window_dry_end=5, date_min_start="01-01", date_min_end="01-01", @@ -575,7 +575,7 @@ def test_days_over_precip_thresh__seasonal_indexer(open_dataset): per = pr.quantile(0.8, "time", keep_attrs=True) # WHEN out = atmos.days_over_precip_thresh( - pr, per, freq="AS", date_bounds=("01-10", "12-31") + pr, per, freq="YS", date_bounds=("01-10", "12-31") ) # THEN np.testing.assert_almost_equal(out[0], np.array([81.0, 66.0, 66.0, 75.0])) diff --git a/tests/test_run_length.py b/tests/test_run_length.py index 012b740ea..7617c6f87 100644 --- a/tests/test_run_length.py +++ b/tests/test_run_length.py @@ -166,12 +166,12 @@ def test_simple(self): time = pd.date_range("7/1/2000", periods=len(values), freq="D") values[1:11] = 1 da = xr.DataArray(values != 0, coords={"time": time}, dims="time") - lt = da.resample(time="M").map(rl.rle_statistics, reducer="max", window=1) + lt = da.resample(time="ME").map(rl.rle_statistics, reducer="max", window=1) assert lt[0] == 10 np.testing.assert_array_equal(lt[1:], 0) # resample after - lt = rl.rle_statistics(da, freq="M", reducer="max", window=1, ufunc_1dim=False) + lt = rl.rle_statistics(da, freq="ME", reducer="max", window=1, ufunc_1dim=False) assert lt[0] == 10 np.testing.assert_array_equal(lt[1:], 0) @@ -180,12 +180,12 @@ def test_start_at_0(self): time = pd.date_range("7/1/2000", periods=len(values), freq="D") values[0:10] = 1 da = xr.DataArray(values != 0, coords={"time": time}, dims="time") - lt = da.resample(time="M").map(rl.rle_statistics, reducer="max", window=1) + lt = da.resample(time="ME").map(rl.rle_statistics, reducer="max", window=1) assert lt[0] == 10 np.testing.assert_array_equal(lt[1:], 0) # resample after - lt = rl.rle_statistics(da, freq="M", reducer="max", window=1, ufunc_1dim=False) + lt = rl.rle_statistics(da, freq="ME", reducer="max", window=1, ufunc_1dim=False) assert lt[0] == 10 np.testing.assert_array_equal(lt[1:], 0) @@ -195,12 +195,12 @@ def test_end_start_at_0(self): values[-10:] = 1 da = xr.DataArray(values != 0, coords={"time": time}, dims="time") - lt = da.resample(time="M").map(rl.rle_statistics, reducer="max", window=1) + lt = da.resample(time="ME").map(rl.rle_statistics, reducer="max", window=1) assert lt[-1] == 10 np.testing.assert_array_equal(lt[:-1], 0) # resample after - lt = rl.rle_statistics(da, freq="M", reducer="max", window=1, ufunc_1dim=False) + lt = rl.rle_statistics(da, freq="ME", reducer="max", window=1, ufunc_1dim=False) assert lt[-1] == 10 np.testing.assert_array_equal(lt[:-1], 0) @@ -209,11 +209,11 @@ def test_all_true(self): time = pd.date_range("7/1/2000", periods=len(values), freq="D") da = xr.DataArray(values != 0, coords={"time": time}, dims="time") - lt = da.resample(time="M").map(rl.rle_statistics, reducer="max", window=1) - np.testing.assert_array_equal(lt, da.resample(time="M").count(dim="time")) + lt = da.resample(time="ME").map(rl.rle_statistics, reducer="max", window=1) + np.testing.assert_array_equal(lt, da.resample(time="ME").count(dim="time")) # resample after - lt = rl.rle_statistics(da, freq="M", reducer="max", window=1, ufunc_1dim=False) + lt = rl.rle_statistics(da, freq="ME", reducer="max", window=1, ufunc_1dim=False) expected = np.zeros(12) expected[0] = 365 np.testing.assert_array_equal(lt, expected) @@ -225,13 +225,13 @@ def test_almost_all_true(self): time = pd.date_range("7/1/2000", periods=len(values), freq="D") da = xr.DataArray(values != 0, coords={"time": time}, dims="time") - lt = da.resample(time="M").map(rl.rle_statistics, reducer="max", window=1) - n = da.resample(time="M").count(dim="time") + lt = da.resample(time="ME").map(rl.rle_statistics, reducer="max", window=1) + n = da.resample(time="ME").count(dim="time") np.testing.assert_array_equal(lt[0], n[0]) np.testing.assert_array_equal(lt[1], 26) # resample after - lt = rl.rle_statistics(da, freq="M", reducer="max", window=1, ufunc_1dim=False) + lt = rl.rle_statistics(da, freq="ME", reducer="max", window=1, ufunc_1dim=False) expected = np.zeros(12) expected[0], expected[1] = 35, 365 - 35 - 1 np.testing.assert_array_equal(lt[0], expected[0]) @@ -304,7 +304,7 @@ def test_real_data(self, open_dataset): # FIXME: No test here?! # n-dim version versus ufunc da3d = open_dataset(self.nc_pr).pr[:, 40:50, 50:68] != 0 - da3d.resample(time="M").map(rl.first_run, window=5) + da3d.resample(time="ME").map(rl.first_run, window=5) @pytest.mark.parametrize( "coord,expected", @@ -617,28 +617,28 @@ def test_run_with_dates_different_calendars(self, calendar, expected): tas = xr.DataArray(tas, coords={"time": time}, dims=("time",)) out = ( (tas > 0) - .resample(time="AS-MAR") + .resample(time="YS-MAR") .map(rl.first_run_after_date, date="03-01", window=2) ) np.testing.assert_array_equal(out.values[1:], expected) out = ( (tas > 0) - .resample(time="AS-MAR") + .resample(time="YS-MAR") .map(rl.season_length, date="03-02", window=2) ) np.testing.assert_array_equal(out.values[1:], [250, 250]) out = ( (tas > 0) - .resample(time="AS-MAR") + .resample(time="YS-MAR") .map(rl.run_end_after_date, date="03-03", window=2) ) np.testing.assert_array_equal(out.values[1:], np.array(expected) + 250) out = ( (tas > 0) - .resample(time="AS-MAR") + .resample(time="YS-MAR") .map(rl.last_run_before_date, date="03-02", window=2) ) np.testing.assert_array_equal(out.values[1:], np.array(expected) + 1) diff --git a/tests/test_snow.py b/tests/test_snow.py index 2ed405a77..054f53a58 100644 --- a/tests/test_snow.py +++ b/tests/test_snow.py @@ -10,7 +10,7 @@ class TestSnowDepth: def test_simple(self, snd_series): snd = snd_series(np.ones(110), start="2001-01-01") - out = land.snow_depth(snd, freq="M") + out = land.snow_depth(snd, freq="ME") assert out.units == "cm" np.testing.assert_array_equal(out, [100, 100, 100, np.nan]) @@ -19,7 +19,7 @@ class TestSnowDepthCoverDuration: def test_simple(self, snd_series): snd = snd_series(np.ones(110), start="2001-01-01") - out = land.snd_season_length(snd, freq="M") + out = land.snd_season_length(snd, freq="ME") assert out.units == "days" np.testing.assert_array_equal(out, [31, 28, 31, np.nan]) @@ -30,7 +30,7 @@ class TestSnowWaterCoverDuration: ) def test_simple(self, snw_series, factor, exp): snw = snw_series(np.ones(110) * factor, start="2001-01-01") - out = land.snw_season_length(snw, freq="M") + out = land.snw_season_length(snw, freq="ME") assert out.units == "days" np.testing.assert_array_equal(out, exp) @@ -74,7 +74,7 @@ def test_simple(self, snd_series): a = np.zeros(365) a[200] = 1 snd = snd_series(a, start="2001-07-01") - out = land.snd_max_doy(snd, freq="AS-JUL") + out = land.snd_max_doy(snd, freq="YS-JUL") np.testing.assert_array_equal(out, snd.time.dt.dayofyear[200]) def test_units(self, tas_series, random): diff --git a/tests/test_temperature.py b/tests/test_temperature.py index 897decbc1..41a71865c 100644 --- a/tests/test_temperature.py +++ b/tests/test_temperature.py @@ -28,7 +28,7 @@ def test_simple(self, tasmin_series, random): tn = tasmin_series(tn) tn10 = percentile_doy(tn, per=10).sel(percentiles=10) - out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL") + out = atmos.cold_spell_duration_index(tn, tn10, freq="YS-JUL") assert out[0] == 10 def test_convert_units(self, tasmin_series, random): @@ -44,7 +44,7 @@ def test_convert_units(self, tasmin_series, random): tn.attrs["units"] = "C" tn10 = percentile_doy(tn, per=10).sel(percentiles=10) - out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL") + out = atmos.cold_spell_duration_index(tn, tn10, freq="YS-JUL") assert out[0] == 10 def test_nan_presence(self, tasmin_series, random): @@ -61,7 +61,7 @@ def test_nan_presence(self, tasmin_series, random): tn = tasmin_series(tn) tn10 = percentile_doy(tn, per=10).sel(percentiles=10) - out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL") + out = atmos.cold_spell_duration_index(tn, tn10, freq="YS-JUL") assert np.isnan(out[0]) @@ -371,7 +371,7 @@ def test_simple(self, tasmin_series): a[300:400] = K2C - 5 a[404:407] = K2C - 5 tasmin = tasmin_series(a, start="2000-01-01") - # Default, window = 5, mid_date = 07-01, freq= AS-JUL + # Default, window = 5, mid_date = 07-01, freq= YS-JUL out = atmos.frost_season_length(tasmin=tasmin) np.testing.assert_array_equal(out, [np.nan, 107, np.nan]) @@ -1144,7 +1144,7 @@ def test_tx90p__seasonal_indexer(self, tasmax_series): # create cold spell in june tas[175:180] = 1 # WHEN - out = atmos.tx90p(tas, t90, freq="AS", season="JJA") + out = atmos.tx90p(tas, t90, freq="YS", season="JJA") # THEN assert out[0] == 87 # non regression test @@ -1295,7 +1295,7 @@ def test_warm_spell_duration_index(self, open_dataset): tx90 = percentile_doy(tasmax, window=5, per=90) out = atmos.warm_spell_duration_index( - tasmax=tasmax, tasmax_per=tx90, window=3, freq="AS-JUL" + tasmax=tasmax, tasmax_per=tx90, window=3, freq="YS-JUL" ) np.testing.assert_array_equal( out.isel(location=0, percentiles=0), np.array([np.nan, 4, 0, 0, np.nan]) @@ -1489,7 +1489,7 @@ def test_simple(self, tas_series): tg = tas_series(a + K2C, start="1/1/2000") - out = atmos.cold_spell_frequency(tg, freq="AS") + out = atmos.cold_spell_frequency(tg, freq="YS") np.testing.assert_array_equal(out, 1) @@ -1500,7 +1500,7 @@ def test_simple(self, tas_series): tg = tas_series(a + K2C, start="1/1/2000") - out = atmos.cold_spell_max_length(tg, freq="AS") + out = atmos.cold_spell_max_length(tg, freq="YS") np.testing.assert_array_equal(out, 5) @@ -1511,5 +1511,5 @@ def test_simple(self, tas_series): tg = tas_series(a + K2C, start="1/1/2000") - out = atmos.cold_spell_total_length(tg, freq="AS") + out = atmos.cold_spell_total_length(tg, freq="YS") np.testing.assert_array_equal(out, 8) diff --git a/tests/test_units.py b/tests/test_units.py index 116e4c2a3..f83055082 100644 --- a/tests/test_units.py +++ b/tests/test_units.py @@ -210,7 +210,7 @@ def test_rate2amount(pr_series): with xr.set_options(keep_attrs=True): pr_ms = pr.resample(time="MS").mean() - pr_m = pr.resample(time="M").mean() + pr_m = pr.resample(time="ME").mean() am_ms = rate2amount(pr_ms) np.testing.assert_array_equal(am_ms[:4], 86400 * np.array([31, 28, 31, 30])) @@ -233,7 +233,7 @@ def test_amount2rate(pr_series): with xr.set_options(keep_attrs=True): am_ms = am.resample(time="MS").sum() - am_m = am.resample(time="M").sum() + am_m = am.resample(time="ME").sum() pr_ms = amount2rate(am_ms) np.testing.assert_allclose(pr_ms, 1) diff --git a/xclim/core/calendar.py b/xclim/core/calendar.py index e63d4ac21..6071f6ab3 100644 --- a/xclim/core/calendar.py +++ b/xclim/core/calendar.py @@ -270,7 +270,7 @@ def convert_doy( Name of the temporal dimension. """ source_cal = source_cal or source.attrs.get("calendar", get_calendar(source[dim])) - is_calyear = xr.infer_freq(source[dim]) in ("AS-JAN", "A-DEC") + is_calyear = xr.infer_freq(source[dim]) in ("YS-JAN", "Y-DEC", "YE-DEC") if is_calyear: # Fast path year_of_the_doy = source[dim].dt.year @@ -794,20 +794,21 @@ def parse_offset(freq: str) -> Sequence[str]: multiplier : int Multiplier of the base frequency. "[n]W" is always replaced with "[7n]D", as xarray doesn't support "W" for cftime indexes. offset_base : str - Base frequency. "Y" is always replaced with "A". + Base frequency. is_start_anchored : bool Whether coordinates of this frequency should correspond to the beginning of the period (`True`) or its end (`False`). - Can only be False when base is A, Q or M; in other words, xclim assumes frequencies finer than monthly are all start-anchored. + Can only be False when base is Y, Q or M; in other words, xclim assumes frequencies finer than monthly are all start-anchored. anchor : str or None - Anchor date for bases A or Q. As xarray doesn't support "W", neither does xclim (anchor information is lost when given). + Anchor date for bases Y or Q. As xarray doesn't support "W", neither does xclim (anchor information is lost when given). """ # Useful to raise on invalid freqs, convert Y to A and get default anchor (A, Q) offset = pd.tseries.frequencies.to_offset(freq) base, *anchor = offset.name.split("-") anchor = anchor[0] if len(anchor) > 0 else None - start = ("S" in base) or (base[0] not in "AQM") - base = base[0] + start = ("S" in base) or (base[0] not in "AYQM") + if base.endswith("S") or base.endswith("E"): + base = base[:-1] mult = offset.n if base == "W": mult = 7 * mult @@ -826,7 +827,7 @@ def construct_offset(mult: int, base: str, start_anchored: bool, anchor: str | N base : str The base period string (one char). start_anchored: bool - If True and base in [Y, A, Q, M], adds the "S" flag. + If base in [Y, A, Q, M] : True adds the "S" flag, False adds 'E'. anchor: str, optional The month anchor of the offset. Defaults to JAN for bases AS, Y and QS and to DEC for bases A and Q. @@ -839,7 +840,7 @@ def construct_offset(mult: int, base: str, start_anchored: bool, anchor: str | N ----- This provides the mirror opposite functionality of :py:func:`parse_offset`. """ - start = "S" if start_anchored and base in "YAQM" else "" + start = ("S" if start_anchored else "E") if base in "YAQM" else "" if anchor is None and base in "AQY": anchor = "JAN" if start_anchored else "DEC" return ( @@ -868,7 +869,7 @@ def is_offset_divisor(divisor: str, offset: str): -------- >>> is_offset_divisor("QS-Jan", "YS") True - >>> is_offset_divisor("QS-DEC", "AS-JUL") + >>> is_offset_divisor("QS-DEC", "YS-JUL") False >>> is_offset_divisor("D", "M") True @@ -884,7 +885,16 @@ def is_offset_divisor(divisor: str, offset: str): offBs = pd.tseries.frequencies.to_offset(construct_offset(mB, bB, True, aB)) tB = pd.date_range("1970-01-01T00:00:00", freq=offBs, periods=13) - if bA in "WDHTLUN" or bB in "WDHTLUN": + if bA in ["W", "D", "h", "min", "s", "ms", "us", "ms"] or bB in [ + "W", + "D", + "h", + "min", + "s", + "ms", + "us", + "ms", + ]: # Simple length comparison is sufficient for submonthly freqs # In case one of bA or bB is > W, we test many to be sure. tA = pd.date_range("1970-01-01T00:00:00", freq=offAs, periods=13) @@ -1033,7 +1043,7 @@ def time_bnds( # noqa: C901 time : DataArray, Dataset, CFTimeIndex, DatetimeIndex, DataArrayResample or DatasetResample Object which contains a time index as a proxy representation for a period index. freq : str, optional - String specifying the frequency/offset such as 'MS', '2D', or '3T' + String specifying the frequency/offset such as 'MS', '2D', or '3min' If not given, it is inferred from the time index, which means that index must have at least three elements. precision : str, optional @@ -1051,28 +1061,24 @@ def time_bnds( # noqa: C901 Notes ----- xclim assumes that indexes for greater-than-day frequencies are "floored" down to a daily resolution. - For example, the coordinate "2000-01-31 00:00:00" with a "M" frequency is assumed to mean a period + For example, the coordinate "2000-01-31 00:00:00" with a "ME" frequency is assumed to mean a period going from "2000-01-01 00:00:00" to "2000-01-31 23:59:59.999999". Similarly, it assumes that daily and finer frequencies yield indexes pointing to the period's start. - So "2000-01-31 00:00:00" with a "3H" frequency, means a period going from "2000-01-31 00:00:00" to + So "2000-01-31 00:00:00" with a "3h" frequency, means a period going from "2000-01-31 00:00:00" to "2000-01-31 02:59:59.999999". """ if isinstance(time, (xr.DataArray, xr.Dataset)): time = time.indexes[time.name] elif isinstance(time, (DataArrayResample, DatasetResample)): - # TODO: Remove conditional when pinning xarray above 2023.5.0 - if hasattr(time, "_full_index"): # xr < 2023.5.0 - time = time._full_index - else: # xr >= 2023.5.0 - for grouper in time.groupers: - if "time" in grouper.dims: - time = grouper.group_as_index - break - else: - raise ValueError( - 'Got object resampled along another dimension than "time".' - ) + for grouper in time.groupers: + if "time" in grouper.dims: + time = grouper.group_as_index + break + else: + raise ValueError( + 'Got object resampled along another dimension than "time".' + ) if freq is None and hasattr(time, "freq"): freq = time.freq @@ -1086,27 +1092,27 @@ def time_bnds( # noqa: C901 # Normalizing without using `.normalize` because cftime doesn't have it floor = {"hour": 0, "minute": 0, "second": 0, "microsecond": 0, "nanosecond": 0} - if freq_base in "HTSLUN": # This is verbose, is there a better way? + if freq_base in ["h", "min", "s", "ms", "us", "ns"]: floor.pop("hour") - if freq_base in "TSLUN": + if freq_base in ["min", "s", "ms", "us", "ns"]: floor.pop("minute") - if freq_base in "SLUN": + if freq_base in ["s", "ms", "us", "ns"]: floor.pop("second") - if freq_base in "UN": + if freq_base in ["us", "ns"]: floor.pop("microsecond") - if freq_base in "N": + if freq_base == "ns": floor.pop("nanosecond") if isinstance(time, xr.CFTimeIndex): period = xr.coding.cftime_offsets.to_offset(freq) is_on_offset = period.onOffset - eps = pd.Timedelta(precision or "1U").to_pytimedelta() + eps = pd.Timedelta(precision or "1us").to_pytimedelta() day = pd.Timedelta("1D").to_pytimedelta() floor.pop("nanosecond") # unsupported by cftime else: period = pd.tseries.frequencies.to_offset(freq) is_on_offset = period.is_on_offset - eps = pd.Timedelta(precision or "1N") + eps = pd.Timedelta(precision or "1ns") day = pd.Timedelta("1D") def shift_time(t): diff --git a/xclim/core/datachecks.py b/xclim/core/datachecks.py index f1dba1e5e..907b44ea9 100644 --- a/xclim/core/datachecks.py +++ b/xclim/core/datachecks.py @@ -24,12 +24,12 @@ def check_freq(var: xr.DataArray, freq: str | Sequence[str], strict: bool = True var : xr.DataArray Input array. freq : str or sequence of str - The expected temporal frequencies, using Pandas frequency terminology ({'A', 'M', 'D', 'H', 'T', 'S', 'L', 'U'}) + The expected temporal frequencies, using Pandas frequency terminology ({'Y', 'M', 'D', 'h', 'min', 's', 'ms', 'us'}) and multiples thereof. To test strictly for 'W', pass '7D' with `strict=True`. - This ignores the start flag and the anchor (ex: 'AS-JUL' will validate against 'Y'). + This ignores the start/end flag and the anchor (ex: 'YS-JUL' will validate against 'Y'). strict : bool - Whether multiples of the frequencies are considered invalid or not. With `strict` set to False, a '3H' series - will not raise an error if freq is set to 'H'. + Whether multiples of the frequencies are considered invalid or not. With `strict` set to False, a '3h' series + will not raise an error if freq is set to 'h'. Raises ------ @@ -98,7 +98,7 @@ def check_common_time(inputs: Sequence[xr.DataArray]): # Check if anchor is the same freq = freqs[0] base = parse_offset(freq)[1] - fmt = {"H": ":%M", "D": "%H:%M"} + fmt = {"h": ":%M", "D": "%H:%M"} if base in fmt: outs = {da.indexes["time"][0].strftime(fmt[base]) for da in inputs} if len(outs) > 1: diff --git a/xclim/core/formatting.py b/xclim/core/formatting.py index 4f9c7fd01..d3951fa95 100644 --- a/xclim/core/formatting.py +++ b/xclim/core/formatting.py @@ -113,12 +113,12 @@ def format_field(self, value, format_spec): The base values may be given using unix shell-like patterns: >>> fmt = AttrFormatter( - ... {"AS-*": ["annuel", "annuelle"], "MS": ["mensuel", "mensuelle"]}, + ... {"YS-*": ["annuel", "annuelle"], "MS": ["mensuel", "mensuelle"]}, ... ["m", "f"], ... ) >>> fmt.format( ... "La moyenne {freq:f} est faite sur un échantillon {src_timestep:m}", - ... freq="AS-JUL", + ... freq="YS-JUL", ... src_timestep="MS", ... ) 'La moyenne annuelle est faite sur un échantillon mensuel' @@ -163,7 +163,7 @@ def _match_value(self, value): # Arguments to "freq" "D": ["daily", "days"], "YS": ["annual", "years"], - "AS-*": ["annual", "years"], + "YS-*": ["annual", "years"], "MS": ["monthly", "months"], "QS-*": ["seasonal", "seasons"], # Arguments to "indexer" diff --git a/xclim/core/indicator.py b/xclim/core/indicator.py index 1a8c26255..ca40d7ae3 100644 --- a/xclim/core/indicator.py +++ b/xclim/core/indicator.py @@ -1558,7 +1558,7 @@ class Daily(ResamplingIndicator): class Hourly(ResamplingIndicator): """Class for hourly inputs and resampling computes.""" - src_freq = "H" + src_freq = "h" base_registry["Indicator"] = Indicator diff --git a/xclim/core/locales.py b/xclim/core/locales.py index 705ab0881..574eceee0 100644 --- a/xclim/core/locales.py +++ b/xclim/core/locales.py @@ -15,7 +15,7 @@ "attrs_mapping": { "modifiers": ["", "f", "mpl", "fpl"], "YS": ["annuel", "annuelle", "annuels", "annuelles"], - "AS-*": ["annuel", "annuelle", "annuels", "annuelles"], + "YS-*": ["annuel", "annuelle", "annuels", "annuelles"], # ... and so on for other frequent parameters translation... }, "DTRVAR": { diff --git a/xclim/core/missing.py b/xclim/core/missing.py index 1eb95eebc..61914b8b5 100644 --- a/xclim/core/missing.py +++ b/xclim/core/missing.py @@ -51,7 +51,7 @@ "register_missing_method", ] -_np_timedelta64 = {"D": "timedelta64[D]", "H": "timedelta64[h]"} +_np_timedelta64 = {"D": "timedelta64[D]", "h": "timedelta64[h]"} class MissingBase: @@ -215,7 +215,7 @@ class MissingAny(MissingBase): Input array. freq: str Resampling frequency. - src_timestep: {"D", "H", "M"} + src_timestep: {"D", "h", "M"} Expected input frequency. indexer: {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter @@ -295,11 +295,11 @@ def execute(cls, da, freq, src_timestep, options, indexer): raise ValueError( "MissingWMO can only be used with Monthly or longer frequencies." ) - obj = cls(da, "M", src_timestep, **indexer) + obj = cls(da, "ME", src_timestep, **indexer) miss = obj(**options) # Replace missing months by NaNs mda = miss.where(miss == 0) - return MissingAny(mda, freq, "M", **indexer)() + return MissingAny(mda, freq, "ME", **indexer)() def is_missing(self, null, count, nm=11, nc=5): from ..indices import ( @@ -334,7 +334,7 @@ class MissingPct(MissingBase): Resampling frequency. tolerance : float Fraction of missing values that are tolerated [0,1]. - src_timestep : {"D", "H"} + src_timestep : {"D", "h"} Expected input frequency. indexer : {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values, @@ -371,7 +371,7 @@ class AtLeastNValid(MissingBase): Resampling frequency. n : int Minimum of valid values required. - src_timestep : {"D", "H"} + src_timestep : {"D", "h"} Expected input frequency. indexer : {dim: indexer, }, optional Time attribute and values over which to subset the array. For example, use season='DJF' to select winter diff --git a/xclim/core/units.py b/xclim/core/units.py index 185f6726a..85e7ce688 100644 --- a/xclim/core/units.py +++ b/xclim/core/units.py @@ -398,11 +398,6 @@ def cf_conversion(standard_name: str, conversion: str, direction: str) -> str | FREQ_UNITS = { - "N": "ns", - "L": "ms", - "S": "s", - "T": "min", - "H": "h", "D": "d", "W": "week", } @@ -448,7 +443,7 @@ def infer_sampling_units( multi, base, _, _ = parse_offset(freq) try: - out = multi, FREQ_UNITS[base] + out = multi, FREQ_UNITS.get(base, base) except KeyError as err: raise ValueError( f"Sampling frequency {freq} has no corresponding units." @@ -579,7 +574,7 @@ def _rate_and_amount_converter( ) from err if freq is not None: multi, base, start_anchor, _ = parse_offset(freq) - if base in ["M", "Q", "A"]: + if base in ["M", "Q", "A", "Y"]: start = time.indexes[dim][0] if not start_anchor: # Anchor is on the end of the period, subtract 1 period. diff --git a/xclim/data/anuclim.yml b/xclim/data/anuclim.yml index bdbde1308..b5845f174 100644 --- a/xclim/data/anuclim.yml +++ b/xclim/data/anuclim.yml @@ -15,7 +15,7 @@ references: ANUCLIM https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6) base: ResamplingIndicator indicators: P10_MeanTempWarmestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tg_mean_warmcold_quarter cf_attrs: @@ -25,7 +25,7 @@ indicators: parameters: op: warmest P11_MeanTempColdestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tg_mean_warmcold_quarter cf_attrs: @@ -35,7 +35,7 @@ indicators: parameters: op: coldest P12_AnnualPrecip: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: prcptot cf_attrs: @@ -45,7 +45,7 @@ indicators: units: mm context: hydro P13_PrecipWettestPeriod: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: prcptot_wetdry_period cf_attrs: @@ -56,7 +56,7 @@ indicators: op: wettest context: hydro P14_PrecipDriestPeriod: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: prcptot_wetdry_period cf_attrs: @@ -67,7 +67,7 @@ indicators: op: driest context: hydro P15_PrecipSeasonality: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: precip_seasonality cf_attrs: @@ -76,7 +76,7 @@ indicators: "The standard deviation of the precipitation estimates expressed as a percentage of the mean of those estimates." P16_PrecipWettestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: prcptot_wetdry_quarter cf_attrs: @@ -86,7 +86,7 @@ indicators: parameters: op: wettest P17_PrecipDriestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: prcptot_wetdry_quarter cf_attrs: @@ -97,7 +97,7 @@ indicators: op: driest P18_PrecipWarmestQuarter: src_freq: ['D', '7D', 'M'] - allowed_periods: [A] + allowed_periods: ["Y"] compute: prcptot_warmcold_quarter cf_attrs: standard_name: lwe_thickness_of_precipitation_amount @@ -107,7 +107,7 @@ indicators: op: warmest P19_PrecipColdestQuarter: src_freq: ['D', '7D', 'M'] - allowed_periods: [A] + allowed_periods: ["Y"] compute: prcptot_warmcold_quarter cf_attrs: standard_name: lwe_thickness_of_precipitation_amount @@ -116,7 +116,7 @@ indicators: parameters: op: coldest P1_AnnMeanTemp: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tg_mean cf_attrs: @@ -125,7 +125,7 @@ indicators: long_name: Annual Mean Temperature standard_name: air_temperature P2_MeanDiurnalRange: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: daily_temperature_range cf_attrs: @@ -133,14 +133,14 @@ indicators: long_name: Mean Diurnal Range cell_methods: "time: range" P3_Isothermality: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: isothermality cf_attrs: cell_methods: "time: range" description: "The mean diurnal range (P2) divided by the Annual Temperature Range (P7)." P4_TempSeasonality: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: temperature_seasonality cf_attrs: @@ -150,7 +150,7 @@ indicators: For this calculation, the mean in degrees Kelvin is used. This avoids the possibility of having to divide by zero, but it does mean that the values are usually quite small." P5_MaxTempWarmestPeriod: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tx_max cf_attrs: @@ -160,7 +160,7 @@ indicators: units: K cell_methods: "time: maximum" P6_MinTempColdestPeriod: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tn_min cf_attrs: @@ -170,7 +170,7 @@ indicators: units: K cell_methods: "time: minimum" P7_TempAnnualRange: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: extreme_temperature_range input: @@ -184,7 +184,7 @@ indicators: freq: default: YS P8_MeanTempWettestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tg_mean_wetdry_quarter cf_attrs: @@ -194,7 +194,7 @@ indicators: parameters: op: wettest P9_MeanTempDriestQuarter: - allowed_periods: [A] + allowed_periods: ["Y"] src_freq: ['D', '7D', 'M'] compute: tg_mean_wetdry_quarter cf_attrs: diff --git a/xclim/data/fr.json b/xclim/data/fr.json index 2db49841e..5e76a0797 100644 --- a/xclim/data/fr.json +++ b/xclim/data/fr.json @@ -21,7 +21,7 @@ "annuelles", "années" ], - "AS-*": [ + "YS-*": [ "annuel", "annuelle", "annuels", diff --git a/xclim/data/schema.yml b/xclim/data/schema.yml index 97aed6791..10bb25abd 100644 --- a/xclim/data/schema.yml +++ b/xclim/data/schema.yml @@ -9,7 +9,7 @@ variables: map(include('variable'), key=regex(r'^[\w]+$'), required=False) --- indicator: abstract: str(required=False) - allowed_periods: list(enum('A', 'Q', 'M', 'W'), required=False) + allowed_periods: list(enum('A', 'Y', 'Q', 'M', 'W'), required=False) src_freq: list(str(), required=False) base: str(required=False) compute: str(required=False) diff --git a/xclim/indices/_agro.py b/xclim/indices/_agro.py index a3ac3eebb..6e0ffe808 100644 --- a/xclim/indices/_agro.py +++ b/xclim/indices/_agro.py @@ -176,7 +176,7 @@ def huglin_index( end_date : DayOfYearStr The hemisphere-based start date to consider (north = October, south = April). This date is non-inclusive. freq : str - Resampling frequency (default: "YS"; For Southern Hemisphere, should be "AS-JUL"). + Resampling frequency (default: "YS"; For Southern Hemisphere, should be "YS-JUL"). Returns ------- @@ -351,7 +351,7 @@ def biologically_effective_degree_days( end_date : DayOfYearStr The hemisphere-based start date to consider (north = October, south = April). This date is non-inclusive. freq : str - Resampling frequency (default: "YS"; For Southern Hemisphere, should be "AS-JUL"). + Resampling frequency (default: "YS"; For Southern Hemisphere, should be "YS-JUL"). Returns ------- @@ -648,8 +648,8 @@ def dryness_index( :cite:cts:`tonietto_multicriteria_2004,riou_determinisme_1994` """ - if parse_offset(freq) != (1, "A", True, "JAN"): - raise ValueError(f"Freq not allowed: {freq}. Must be `YS` or `AS-JAN`") + if parse_offset(freq) != (1, "Y", True, "JAN"): + raise ValueError(f"Freq not allowed: {freq}. Must be `YS` or `YS-JAN`") # Resample all variables to monthly totals in mm units. evspsblpot = ( @@ -717,9 +717,9 @@ def dryness_index( # Dryness index if has_north: - di_north = wo + (pr_masked - t_v - e_s).resample(time="AS-JAN").sum() + di_north = wo + (pr_masked - t_v - e_s).resample(time="YS-JAN").sum() if has_south: - di_south = wo + (pr_masked - t_v - e_s).resample(time="AS-JUL").sum() + di_south = wo + (pr_masked - t_v - e_s).resample(time="YS-JUL").sum() # Shift time for Southern Hemisphere to allow for concatenation with Northern Hemisphere di_south = di_south.shift(time=1).isel(time=slice(1, None)) di_south["time"] = di_south.indexes["time"].shift(-6, "MS") @@ -922,7 +922,7 @@ def rain_season( method_dry_end: str = "per_day", date_min_end: DayOfYearStr = "09-01", date_max_end: DayOfYearStr = "12-31", - freq="AS-JAN", + freq="YS-JAN", ): """Find the length of the rain season and the day of year of its start and its end. diff --git a/xclim/indices/_hydrology.py b/xclim/indices/_hydrology.py index aa8f8d728..3785157af 100644 --- a/xclim/indices/_hydrology.py +++ b/xclim/indices/_hydrology.py @@ -105,7 +105,7 @@ def rb_flashiness_index(q: xarray.DataArray, freq: str = "YS") -> xarray.DataArr @declare_units(snd="[length]") -def snd_max(snd: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: +def snd_max(snd: xarray.DataArray, freq: str = "YS-JUL") -> xarray.DataArray: """Maximum snow depth. The maximum daily snow depth. @@ -126,7 +126,7 @@ def snd_max(snd: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: @declare_units(snd="[length]") -def snd_max_doy(snd: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: +def snd_max_doy(snd: xarray.DataArray, freq: str = "YS-JUL") -> xarray.DataArray: """Maximum snow depth day of year. Day of year when surface snow reaches its peak value. If snow depth is 0 over entire period, return NaN. @@ -157,7 +157,7 @@ def snd_max_doy(snd: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray @declare_units(snw="[mass]/[area]") -def snw_max(snw: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: +def snw_max(snw: xarray.DataArray, freq: str = "YS-JUL") -> xarray.DataArray: """Maximum snow amount. The maximum daily snow amount. @@ -178,7 +178,7 @@ def snw_max(snw: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: @declare_units(snw="[mass]/[area]") -def snw_max_doy(snw: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray: +def snw_max_doy(snw: xarray.DataArray, freq: str = "YS-JUL") -> xarray.DataArray: """Maximum snow amount day of year. Day of year when surface snow amount reaches its peak value. If snow amount is 0 over entire period, return NaN. @@ -210,7 +210,7 @@ def snw_max_doy(snw: xarray.DataArray, freq: str = "AS-JUL") -> xarray.DataArray @declare_units(snw="[mass]/[area]") def snow_melt_we_max( - snw: xarray.DataArray, window: int = 3, freq: str = "AS-JUL" + snw: xarray.DataArray, window: int = 3, freq: str = "YS-JUL" ) -> xarray.DataArray: """Maximum snow melt. @@ -244,7 +244,7 @@ def snow_melt_we_max( @declare_units(snw="[mass]/[area]", pr="[precipitation]") def melt_and_precip_max( - snw: xarray.DataArray, pr: xarray.DataArray, window: int = 3, freq: str = "AS-JUL" + snw: xarray.DataArray, pr: xarray.DataArray, window: int = 3, freq: str = "YS-JUL" ) -> xarray.DataArray: """Maximum snow melt and precipitation. diff --git a/xclim/indices/_multivariate.py b/xclim/indices/_multivariate.py index 0e75f7b5a..bb1769292 100644 --- a/xclim/indices/_multivariate.py +++ b/xclim/indices/_multivariate.py @@ -1816,7 +1816,7 @@ def blowing_snow( snd_thresh: Quantified = "5 cm", sfcWind_thresh: Quantified = "15 km/h", # noqa window: int = 3, - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: """Blowing snow days. diff --git a/xclim/indices/_threshold.py b/xclim/indices/_threshold.py index f7a3d29a2..6d3307d45 100644 --- a/xclim/indices/_threshold.py +++ b/xclim/indices/_threshold.py @@ -144,7 +144,7 @@ def cold_spell_days( tas: xarray.DataArray, thresh: Quantified = "-10 degC", window: int = 5, - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = "<", resample_before_rl: bool = True, ) -> xarray.DataArray: @@ -203,7 +203,7 @@ def cold_spell_frequency( tas: xarray.DataArray, thresh: Quantified = "-10 degC", window: int = 5, - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = "<", resample_before_rl: bool = True, ) -> xarray.DataArray: @@ -252,7 +252,7 @@ def cold_spell_max_length( tas: xarray.DataArray, thresh: Quantified = "-10 degC", window: int = 1, - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = "<", resample_before_rl: bool = True, ) -> xarray.DataArray: @@ -300,7 +300,7 @@ def cold_spell_total_length( tas: xarray.DataArray, thresh: Quantified = "-10 degC", window: int = 3, - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = "<", resample_before_rl: bool = True, ) -> xarray.DataArray: @@ -349,7 +349,7 @@ def snd_season_end( snd: xarray.DataArray, thresh: Quantified = "2 cm", window: int = 14, - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""End date of continuous snow depth cover. @@ -401,7 +401,7 @@ def snw_season_end( snw: xarray.DataArray, thresh: Quantified = "20.00 kg m-2", window: int = 14, - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""End date of continuous snow water cover. @@ -458,7 +458,7 @@ def snd_season_start( snd: xarray.DataArray, thresh: Quantified = "2 cm", window: int = 14, - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Start date of continuous snow depth cover. @@ -514,7 +514,7 @@ def snw_season_start( snw: xarray.DataArray, thresh: Quantified = "20.00 kg m-2", window: int = 14, - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Start date of continuous snow water cover. @@ -572,7 +572,7 @@ def snw_season_start( @declare_units(snd="[length]", thresh="[length]") def snd_storm_days( - snd: xarray.DataArray, thresh: Quantified = "25 cm", freq: str = "AS-JUL" + snd: xarray.DataArray, thresh: Quantified = "25 cm", freq: str = "YS-JUL" ) -> xarray.DataArray: """Days with snowfall over threshold. @@ -614,7 +614,7 @@ def snd_storm_days( @declare_units(snw="[mass]/[area]", thresh="[mass]/[area]") def snw_storm_days( - snw: xarray.DataArray, thresh: Quantified = "10 kg m-2", freq: str = "AS-JUL" + snw: xarray.DataArray, thresh: Quantified = "10 kg m-2", freq: str = "YS-JUL" ) -> xarray.DataArray: """Days with snowfall over threshold. @@ -1062,11 +1062,11 @@ def growing_season_length( For the Northern Hemisphere: - >>> gsl_nh = growing_season_length(tas, mid_date="07-01", freq="AS") + >>> gsl_nh = growing_season_length(tas, mid_date="07-01", freq="YS") If working in the Southern Hemisphere, one can use: - >>> gsl_sh = growing_season_length(tas, mid_date="01-01", freq="AS-JUL") + >>> gsl_sh = growing_season_length(tas, mid_date="01-01", freq="YS-JUL") References ---------- @@ -1091,7 +1091,7 @@ def frost_season_length( window: int = 5, mid_date: DayOfYearStr | None = "01-01", thresh: Quantified = "0.0 degC", - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = "<", ) -> xarray.DataArray: r"""Frost season length. @@ -1148,7 +1148,7 @@ def frost_season_length( For the Northern Hemisphere: - >>> fsl_nh = frost_season_length(tasmin, freq="AS-JUL") + >>> fsl_nh = frost_season_length(tasmin, freq="YS-JUL") If working in the Southern Hemisphere, one can use: @@ -1335,7 +1335,7 @@ def frost_free_season_length( If working in the Southern Hemisphere, one can use: - >>> ffsl_sh = frost_free_season_length(tasmin, freq="AS-JUL") + >>> ffsl_sh = frost_free_season_length(tasmin, freq="YS-JUL") """ thresh = convert_units_to(thresh, tasmin) cond = compare(tasmin, op, thresh, constrain=(">=", ">")) @@ -1354,7 +1354,7 @@ def frost_free_spell_max_length( tasmin: xarray.DataArray, thresh: Quantified = "0.0 degC", window: int = 1, - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = ">=", resample_before_rl: bool = True, ) -> xarray.DataArray: @@ -1569,7 +1569,7 @@ def first_day_temperature_above( def first_snowfall( prsn: xarray.DataArray, thresh: Quantified = "1 mm/day", - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""First day with snowfall rate above a threshold. @@ -1623,7 +1623,7 @@ def first_snowfall( def last_snowfall( prsn: xarray.DataArray, thresh: Quantified = "1 mm/day", - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Last day with snowfall above a threshold. @@ -1683,7 +1683,7 @@ def days_with_snow( prsn: xarray.DataArray, low: Quantified = "0 kg m-2 s-1", high: Quantified = "1E6 kg m-2 s-1", - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Days with snow. @@ -1728,7 +1728,7 @@ def days_with_snow( def snowfall_frequency( prsn: xarray.DataArray, thresh: Quantified = "1 mm/day", - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Percentage of snow days. @@ -1780,7 +1780,7 @@ def snowfall_frequency( def snowfall_intensity( prsn: xarray.DataArray, thresh: Quantified = "1 mm/day", - freq: str = "AS-JUL", + freq: str = "YS-JUL", ) -> xarray.DataArray: r"""Mean daily snowfall rate during snow days. @@ -2099,7 +2099,7 @@ def hot_spell_frequency( def snd_season_length( snd: xarray.DataArray, thresh: Quantified = "2 cm", - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = ">=", ) -> xarray.DataArray: """The number of days with snow depth above a threshold. @@ -2136,7 +2136,7 @@ def snd_season_length( def snw_season_length( snw: xarray.DataArray, thresh: Quantified = "20.00 kg m-2", - freq: str = "AS-JUL", + freq: str = "YS-JUL", op: str = ">=", ) -> xarray.DataArray: """The number of days with snow water above a threshold. @@ -2582,7 +2582,7 @@ def wetdays_prop( def maximum_consecutive_frost_days( tasmin: xarray.DataArray, thresh: Quantified = "0.0 degC", - freq: str = "AS-JUL", + freq: str = "YS-JUL", resample_before_rl: bool = True, ) -> xarray.DataArray: r"""Maximum number of consecutive frost days (Tn < 0℃). @@ -3040,7 +3040,7 @@ def _exceedance_date(grp): @deprecated(from_version="0.46.0", suggested="snd_storm_days") @declare_units(snd="[length]", thresh="[length]") def winter_storm( - snd: xarray.DataArray, thresh: Quantified = "25 cm", freq: str = "AS-JUL" + snd: xarray.DataArray, thresh: Quantified = "25 cm", freq: str = "YS-JUL" ) -> xarray.DataArray: """Days with snowfall over threshold. diff --git a/xclim/indices/generic.py b/xclim/indices/generic.py index f552eb5b3..de8b364d5 100644 --- a/xclim/indices/generic.py +++ b/xclim/indices/generic.py @@ -118,11 +118,11 @@ def doymin(da: xr.DataArray) -> xr.DataArray: def default_freq(**indexer) -> str: """Return the default frequency.""" - freq = "AS-JAN" + freq = "YS-JAN" if indexer: group, value = indexer.popitem() if group == "season": - month = 12 # The "season" scheme is based on AS-DEC + month = 12 # The "season" scheme is based on YS-DEC elif group == "month": month = np.take(value, 0) elif group == "doy_bounds": @@ -131,7 +131,7 @@ def default_freq(**indexer) -> str: month = int(value[0][:2]) else: raise ValueError(f"Unknown group `{group}`.") - freq = "AS-" + _MONTH_ABBREVIATIONS[month] + freq = "YS-" + _MONTH_ABBREVIATIONS[month] return freq diff --git a/xclim/indices/run_length.py b/xclim/indices/run_length.py index 855f4e4c3..d0ce12c6e 100644 --- a/xclim/indices/run_length.py +++ b/xclim/indices/run_length.py @@ -1352,7 +1352,7 @@ def _index_from_1d_array(indices, array): index = index.fillna(0).astype(int) # for each chunk of index, take corresponding values from da - da2 = da.rename("__placeholder__") + da2 = da # .rename("__placeholder__") out = index.map_blocks(_index_from_1d_array, args=(da2,)).rename(da.name) # mask where index was NaN. Drop any auxiliary coord, they are already on `out`. # Chunked aux coord would have the same name on both sides and xarray will want to check if they are equal, which means loading them @@ -1364,7 +1364,7 @@ def _index_from_1d_array(indices, array): ) if idx_ndim == 0: # 0-D case, drop useless coords and dummy dim - out = out.drop_vars(da.dims[0]).squeeze() + out = out.drop_vars(da.dims[0], errors="ignore").squeeze() return out.drop_vars(dim or da.dims[0], errors="ignore") # Case where index.dims is a subset of da.dims. diff --git a/xclim/indices/stats.py b/xclim/indices/stats.py index 7a91e89bc..8fb845196 100644 --- a/xclim/indices/stats.py +++ b/xclim/indices/stats.py @@ -396,7 +396,7 @@ def frequency_analysis( Averaging window length (days). freq : str, optional Resampling frequency. If None, the frequency is assumed to be 'YS' unless the indexer is season='DJF', - in which case `freq` would be set to `AS-DEC`. + in which case `freq` would be set to `YS-DEC`. method : {"ML" or "MLE", "MOM", "PWM", "APP"} Fitting method, either maximum likelihood (ML or MLE), method of moments (MOM), probability weighted moments (PWM), also called L-Moments, or approximate method (APP). diff --git a/xclim/sdba/processing.py b/xclim/sdba/processing.py index 742877ed8..76404fbd4 100644 --- a/xclim/sdba/processing.py +++ b/xclim/sdba/processing.py @@ -474,11 +474,11 @@ def _get_number_of_elements_by_year(time): mult, freq, _, _ = parse_offset(xr.infer_freq(time)) days_in_year = max_doy[cal] - elements_in_year = {"Q": 4, "M": 12, "D": days_in_year, "H": days_in_year * 24} + elements_in_year = {"Q": 4, "M": 12, "D": days_in_year, "h": days_in_year * 24} N_in_year = elements_in_year.get(freq, 1) / mult if N_in_year % 1 != 0: raise ValueError( - f"Sampling frequency of the data must be Q, M, D or H and evenly divide a year (got {mult}{freq})." + f"Sampling frequency of the data must be Q, M, D or h and evenly divide a year (got {mult}{freq})." ) return int(N_in_year) diff --git a/xclim/sdba/utils.py b/xclim/sdba/utils.py index f3d523967..77518212a 100644 --- a/xclim/sdba/utils.py +++ b/xclim/sdba/utils.py @@ -24,7 +24,6 @@ MULTIPLICATIVE = "*" ADDITIVE = "+" -loffsets = {"MS": "14d", "M": "15d", "YS": "181d", "Y": "182d", "QS": "45d", "Q": "46d"} def _ecdf_1d(x, value):