diff --git a/.git_archival.txt b/.git_archival.txt
index 95cb3ee..7c51009 100644
--- a/.git_archival.txt
+++ b/.git_archival.txt
@@ -1 +1,3 @@
-ref-names: $Format:%D$
+node: $Format:%H$
+node-date: $Format:%cI$
+describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
diff --git a/.gitignore b/.gitignore
index a07556d..592a023 100644
--- a/.gitignore
+++ b/.gitignore
@@ -102,7 +102,6 @@ venv.bak/
# mypy
.mypy_cache/
-/.idea/sonarlint/*
-/src/zfit_physics/_version.py
+/.idea/
/tests/tfpwa/data/
/src/zfit_physics/_version.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fb0985b..e1f51be 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -19,12 +19,11 @@ repos:
- id: fix-byte-order-marker
- id: check-ast
-# - repo: https://github.com/PyCQA/docformatter
-# rev: v1.7.5
-# hooks:
-# - id: docformatter
-# args: [ -r, --in-place, --wrap-descriptions, '120', --wrap-summaries, '120', -- ]
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.13.2
+ hooks:
+ - id: isort
- repo: local
hooks:
- id: doc arg replacer
@@ -38,22 +37,21 @@ repos:
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
- - id: python-use-type-annotations
- - id: python-check-mock-methods
- - id: python-no-eval
- - id: rst-directive-colons
- - repo: https://github.com/PyCQA/isort
- rev: 5.13.2
- hooks:
- - id: isort
+ - id: python-use-type-annotations
+ - id: python-check-mock-methods
+ - id: python-no-eval
+ - id: rst-directive-colons
+
+
+
- repo: https://github.com/asottile/pyupgrade
- rev: v3.17.0
+ rev: v3.18.0
hooks:
- id: pyupgrade
args:
- --py39-plus
- repo: https://github.com/asottile/setup-cfg-fmt
- rev: v2.5.0
+ rev: v2.7.0
hooks:
- id: setup-cfg-fmt
args:
@@ -71,11 +69,11 @@ repos:
args:
- --py39-plus
- repo: https://github.com/mgedmin/check-manifest
- rev: '0.49'
+ rev: '0.50'
hooks:
- id: check-manifest
- stages:
- - manual
+ args:
+ - --update
- repo: https://github.com/sondrelg/pep585-upgrade
rev: v1.0
hooks:
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 5c2ddb8..132856c 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -10,6 +10,7 @@ Major Features and Improvements
- add a RooFit compatibility layer and automatically convert losses, also inside minimizers (through ``SimpleLoss.from_any``)
- `TF-PWA `_ support for loss functions. Minimizer can directly minimize the loss function of a model.
- `pyhf `_ support for loss functions. Minimizer can directly minimize the loss function of a model.
+- `ComPWA `_ support for loss functions and pdf. Minimizer can directly minimize the loss function of a model.
Breaking changes
------------------
diff --git a/docs/api/static/zfit_physics.compwa.rst b/docs/api/static/zfit_physics.compwa.rst
new file mode 100644
index 0000000..04532bf
--- /dev/null
+++ b/docs/api/static/zfit_physics.compwa.rst
@@ -0,0 +1,57 @@
+ComPWA
+=======================
+
+`ComPWA `_ is a framework for the coherent amplitude analysis of multi-body decays. It uses a symbolic approach to describe the decay amplitudes and can be used to fit data to extract the decay parameters. ComPWA can be used in combination with zfit to perform the fit by either creating a zfit pdf from the ComPWA model or by using the ComPWA estimator as a loss function for the zfit minimizer.
+
+Import the module with:
+
+.. code-block:: python
+
+ import zfit_physics.compwa as zcompwa
+
+This will enable that :py:function:~` tensorwaves.estimator.Estimator`, can be used as a loss function in zfit minimizers as
+
+.. code-block:: python
+
+ minimizer.minimize(loss=estimator)
+
+More explicitly, the loss function can be created with
+
+.. code-block:: python
+
+ nll = zcompwa.loss.nll_from_estimator(estimator)
+
+which optionally takes already created :py:class:~`zfit.core.interfaces.ZfitParameter` as arguments.
+
+A whole ComPWA model can be converted to a zfit pdf with
+
+.. code-block:: python
+
+ pdf = zcompwa.pdf.ComPWAPDF(compwa_model)
+
+``pdf`` is a full fledged zfit pdf that can be used in the same way as any other zfit pdf! In a sum, product, convolution and of course to fit data.
+
+Variables
+++++++++++++
+
+
+.. automodule:: zfit_physics.compwa.variables
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+PDF
+++++++++++++
+
+.. automodule:: zfit_physics.compwa.pdf
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Loss
+++++++++++++
+
+.. automodule:: zfit_physics.compwa.loss
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/pyproject.toml b/pyproject.toml
index 28d1768..b9979c8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,10 +38,15 @@ dynamic = ["version"]
[project.optional-dependencies]
tfpwa = ["tfpwa@git+https://github.com/jiangyi15/tf-pwa"]
+compwa = [
+ "qrules",
+ "ampform",
+ "tensorwaves[phsp]",
+]
pyhf = [
"pyhf",
]
-all = ["zfit-physics[tfpwa,pyhf]"]
+all = ["zfit-physics[tfpwa,pyhf,compwa]"]
test = [
"pytest",
"pytest-cov",
@@ -50,6 +55,8 @@ test = [
"zfit-physics[all]",
"contextlib_chdir", # backport of chdir from Python 3.11
]
+
+
dev = [
"bumpversion>=0.5.3",
"coverage>=4.5.1",
@@ -110,6 +117,14 @@ report.exclude_also = [
'if typing.TYPE_CHECKING:',
]
+[tool.check-manifest]
+ignore = [
+ ".tox/*",
+ "*/test*",
+ "*/__init__.py",
+ "*/_version.py",
+]
+
[tool.mypy]
files = ["src", "tests"]
python_version = "3.8"
@@ -171,6 +186,7 @@ ignore = [
"PLW2901", # "for loop overwritten by assignment" -> we use this to update the loop variable
"PD013", # "melt over stack": df function, but triggers on tensors
"NPY002", # "Use rnd generator in numpy" -> we use np.random for some legacy stuff but do use the new one where we can
+ "T201", # "print used" -> we use print for displaying information in verbose mode
]
isort.required-imports = ["from __future__ import annotations"]
diff --git a/src/zfit_physics/compwa/__init__.py b/src/zfit_physics/compwa/__init__.py
new file mode 100644
index 0000000..44c08ce
--- /dev/null
+++ b/src/zfit_physics/compwa/__init__.py
@@ -0,0 +1,3 @@
+from . import data, loss, pdf, variables
+
+__all__ = ["pdf", "variables", "loss"]
diff --git a/src/zfit_physics/compwa/data.py b/src/zfit_physics/compwa/data.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/zfit_physics/compwa/loss.py b/src/zfit_physics/compwa/loss.py
new file mode 100644
index 0000000..0232a1e
--- /dev/null
+++ b/src/zfit_physics/compwa/loss.py
@@ -0,0 +1,85 @@
+from __future__ import annotations
+
+import warnings
+from typing import TYPE_CHECKING
+
+import zfit
+from zfit.util.container import convert_to_container
+
+from .variables import params_from_intensity
+
+if TYPE_CHECKING:
+ from tensorwaves.estimator import Estimator
+ from zfit.core.interfaces import ZfitLoss
+
+__all__ = ["nll_from_estimator"]
+
+
+def nll_from_estimator(estimator: Estimator, *, params=None, errordef=None, numgrad=None) -> ZfitLoss:
+ r"""Create a negative log-likelihood function from a tensorwaves estimator.
+
+ Args:
+ estimator: An estimator object that computes a scalar loss function.
+ params: A list of zfit parameters that the loss function depends on.
+ errordef: The error definition of the loss function.
+ numgrad: If True, the gradient of the loss function is computed numerically and the ComPWA estimators
+ gradient method is not used. Can be useful as not all backends in ComPWA support gradients.
+
+ Returns:
+ A zfit loss function that can be used with zfit.
+
+ """
+ from tensorwaves.estimator import ChiSquared, UnbinnedNLL
+
+ if params is None:
+ classname = estimator.__class__.__name__
+ intensity = getattr(estimator, f"_{classname}__function", None)
+ if intensity is None:
+ msg = f"Could not find intensity function in {estimator}. Maybe the attribute changed?"
+ raise ValueError(msg)
+ params = params_from_intensity(intensity)
+ else:
+ params = convert_to_container(params)
+
+ paramnames = [param.name for param in params]
+
+ def func(params):
+ paramdict = dict(zip(paramnames, params))
+ return estimator(paramdict)
+
+ if numgrad:
+ grad = None
+ else:
+
+ def grad(params):
+ paramdict = dict(zip(paramnames, params))
+ return estimator.gradient(paramdict)
+
+ if errordef is None:
+ if hasattr(estimator, "errordef"):
+ errordef = estimator.errordef
+ elif isinstance(estimator, ChiSquared):
+ errordef = 1.0
+ elif isinstance(estimator, UnbinnedNLL):
+ errordef = 0.5
+ return zfit.loss.SimpleLoss(func=func, gradient=grad, params=params, errordef=errordef)
+
+
+def _nll_from_estimator_or_false(estimator: Estimator, *, params=None, errordef=None) -> ZfitLoss | bool:
+ if "tensorwaves" in repr(type(estimator)):
+ try:
+ import tensorwaves as tw
+ except ImportError:
+ return False
+ if not isinstance(estimator, (tw.estimator.ChiSquared, tw.estimator.UnbinnedNLL)):
+ warnings.warn(
+ "Only ChiSquared and UnbinnedNLL are supported from tensorwaves currently."
+ f"TensorWaves is in name of {estimator}, this could be a bug.",
+ stacklevel=2,
+ )
+ return False
+ return nll_from_estimator(estimator, params=params, errordef=errordef)
+ return None
+
+
+zfit.loss.SimpleLoss.register_convertable_loss(_nll_from_estimator_or_false)
diff --git a/src/zfit_physics/compwa/pdf.py b/src/zfit_physics/compwa/pdf.py
new file mode 100644
index 0000000..463435a
--- /dev/null
+++ b/src/zfit_physics/compwa/pdf.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import tensorflow as tf
+import zfit # suppress tf warnings
+import zfit.z.numpy as znp
+from zfit import supports, z
+
+from .variables import obs_from_frame, params_from_intensity
+
+__all__ = ["ComPWAPDF"]
+
+
+class ComPWAPDF(zfit.pdf.BasePDF):
+ def __init__(self, intensity, norm, obs=None, params=None, extended=None, name="ComPWA"):
+ """ComPWA intensity normalized over the *norm* dataset."""
+ if params is None:
+ params = {p.name: p for p in params_from_intensity(intensity)}
+ norm = zfit.Data(norm, obs=obs)
+ if obs is None:
+ obs = obs_from_frame(norm.to_pandas())
+ norm = norm.with_obs(obs)
+ super().__init__(obs, params=params, name=name, extended=extended, autograd_params=[])
+ self.intensity = intensity
+ norm = {ob: znp.array(ar) for ob, ar in zip(self.obs, z.unstack_x(norm))}
+ self.norm_sample = norm
+
+ @supports(norm=True)
+ def _pdf(self, x, norm, params):
+ paramvalsfloat = []
+ paramvalscomplex = []
+ iscomplex = []
+ # we need to split complex and floats to pass them to the numpy function, as it creates a tensor
+ for val in params.values():
+ if val.dtype == znp.complex128:
+ iscomplex.append(True)
+ paramvalscomplex.append(val)
+ paramvalsfloat.append(znp.zeros_like(val, dtype=znp.float64))
+ else:
+ iscomplex.append(False)
+ paramvalsfloat.append(val)
+ paramvalscomplex.append(znp.zeros_like(val, dtype=znp.complex128))
+
+ def unnormalized_pdf_helper(x, paramvalsfloat, paramvalscomplex):
+ data = {ob: znp.array(ar) for ob, ar in zip(self.obs, x)}
+ paramsinternal = {
+ n: c if isc else f for n, f, c, isc in zip(params.keys(), paramvalsfloat, paramvalscomplex, iscomplex)
+ }
+ self.intensity.update_parameters(paramsinternal)
+ return self.intensity(data)
+
+ xunstacked = z.unstack_x(x)
+
+ probs = tf.numpy_function(
+ unnormalized_pdf_helper, [xunstacked, paramvalsfloat, paramvalscomplex], Tout=tf.float64
+ )
+ if norm is not False:
+ normvalues = [znp.asarray(self.norm_sample[ob]) for ob in self.obs]
+ normval = (
+ znp.mean(
+ tf.numpy_function(
+ unnormalized_pdf_helper, [normvalues, paramvalsfloat, paramvalscomplex], Tout=tf.float64
+ )
+ )
+ * znp.array([1.0]) # HACK: ComPWA just uses 1 as the phase space volume, better solution?
+ # norm.volue is very small, since as it's done now (autoconverting in init), there are variables like
+ # masses that have a tiny space, so the volume is very small
+ # * norm.volume
+ )
+ normval.set_shape((1,))
+ probs /= normval
+ probs.set_shape([None])
+ return probs
+
+ # @z.function(wraps="tensorwaves")
+ # def _jitted_normalization(self, norm, params):
+ # return znp.mean(self._jitted_unnormalized_pdf(norm, params=params))
diff --git a/src/zfit_physics/compwa/variables.py b/src/zfit_physics/compwa/variables.py
new file mode 100644
index 0000000..c48088d
--- /dev/null
+++ b/src/zfit_physics/compwa/variables.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from collections.abc import Mapping
+
+import numpy as np
+import pandas as pd
+import zfit
+from zfit.core.interfaces import ZfitUnbinnedData
+
+__all__ = ["obs_from_frame", "params_from_intensity"]
+
+
+def params_from_intensity(intensity):
+ return [
+ zfit.param.convert_to_parameter(val, name=name, prefer_constant=False)
+ for name, val in intensity.parameters.items()
+ ]
+
+
+def obs_from_frame(frame1, frame2=None, bufferfactor=0.01):
+ obs = []
+ if frame2 is None:
+ frame2 = frame1
+
+ if isinstance(frame1, ZfitUnbinnedData) or isinstance(frame2, ZfitUnbinnedData):
+ return frame1.space
+
+ if not isinstance(frame1, (Mapping, pd.DataFrame)) or not isinstance(frame2, (Mapping, pd.DataFrame)):
+ msg = "frame1 and frame2 have to be either a mapping or a pandas DataFrame, or a zfit Data object. They are currently of type: "
+ raise ValueError(
+ msg,
+ type(frame1),
+ type(frame2),
+ )
+ for ob in frame2:
+ minimum = np.min([np.min(frame1[ob]), np.min(frame2[ob])])
+ maximum = np.max([np.max(frame1[ob]), np.max(frame2[ob])])
+ dist = maximum - minimum
+ buffer = bufferfactor * dist
+ obs.append(
+ zfit.Space(
+ ob,
+ limits=(
+ minimum - buffer,
+ maximum + buffer,
+ ),
+ )
+ )
+ return zfit.dimension.combine_spaces(*obs)
diff --git a/tests/compwa/test_compwa_wrapper.py b/tests/compwa/test_compwa_wrapper.py
new file mode 100644
index 0000000..074adc7
--- /dev/null
+++ b/tests/compwa/test_compwa_wrapper.py
@@ -0,0 +1,194 @@
+from __future__ import annotations
+
+import numpy as np
+import pandas as pd
+import pytest
+
+import zfit_physics.compwa as zcompwa
+
+
+# @pytest.fixture()
+def create_amplitude():
+ import qrules
+
+ reaction = qrules.generate_transitions(
+ initial_state=("J/psi(1S)", [-1, +1]),
+ final_state=["gamma", "pi0", "pi0"],
+ allowed_intermediate_particles=["f(0)"],
+ allowed_interaction_types=["strong", "EM"],
+ formalism="helicity",
+ )
+
+ import ampform
+ from ampform.dynamics.builder import (
+ create_non_dynamic_with_ff, create_relativistic_breit_wigner_with_ff)
+
+ model_builder = ampform.get_builder(reaction)
+ model_builder.scalar_initial_state_mass = True
+ model_builder.stable_final_state_ids = [0, 1, 2]
+ model_builder.set_dynamics("J/psi(1S)", create_non_dynamic_with_ff)
+ for name in reaction.get_intermediate_particles().names:
+ model_builder.set_dynamics(name, create_relativistic_breit_wigner_with_ff)
+ model = model_builder.formulate()
+
+
+
+ return model, reaction
+
+
+def test_wrapper_simple_compwa():
+ import zfit
+
+ model, reaction = create_amplitude()
+
+ from tensorwaves.function.sympy import create_parametrized_function
+
+ unfolded_expression = model.expression.doit()
+ intensity_func = create_parametrized_function(
+ expression=unfolded_expression,
+ parameters=model.parameter_defaults,
+ backend="tensorflow",
+ )
+
+ from tensorwaves.data import SympyDataTransformer
+
+ helicity_transformer = SympyDataTransformer.from_sympy(
+ model.kinematic_variables, backend="numpy"
+ )
+ from tensorwaves.data import (IntensityDistributionGenerator,
+ TFPhaseSpaceGenerator,
+ TFUniformRealNumberGenerator,
+ TFWeightedPhaseSpaceGenerator)
+
+ rng = TFUniformRealNumberGenerator(seed=0)
+ phsp_generator = TFPhaseSpaceGenerator(
+ initial_state_mass=reaction.initial_state[-1].mass,
+ final_state_masses={i: p.mass for i, p in reaction.final_state.items()},
+ )
+ phsp_momenta = phsp_generator.generate(100_000, rng)
+
+ weighted_phsp_generator = TFWeightedPhaseSpaceGenerator(
+ initial_state_mass=reaction.initial_state[-1].mass,
+ final_state_masses={i: p.mass for i, p in reaction.final_state.items()},
+ )
+ data_generator = IntensityDistributionGenerator(
+ domain_generator=weighted_phsp_generator,
+ function=intensity_func,
+ domain_transformer=helicity_transformer,
+ )
+ data_momenta = data_generator.generate(10_000, rng)
+
+ phsp = helicity_transformer(phsp_momenta)
+ data = helicity_transformer(data_momenta)
+ data_frame = pd.DataFrame(data)
+ phsp_frame = pd.DataFrame(phsp)
+
+ initial_parameters = {
+ R"C_{J/\psi(1S) \to {f_{0}(1500)}_{0} \gamma_{+1}; f_{0}(1500) \to \pi^{0}_{0} \pi^{0}_{0}}": (
+ 1.0
+ ),
+ "m_{f_{0}(500)}": 0.4,
+ "m_{f_{0}(980)}": 0.88,
+ "m_{f_{0}(1370)}": 1.22,
+ "m_{f_{0}(1500)}": 1.45,
+ "m_{f_{0}(1710)}": 1.83,
+ R"\Gamma_{f_{0}(500)}": 0.3,
+ R"\Gamma_{f_{0}(980)}": 0.1,
+ R"\Gamma_{f_{0}(1710)}": 0.3,
+ }
+
+ free_parameter_symbols = [
+ symbol
+ for symbol in model.parameter_defaults
+ if symbol.name in set(initial_parameters)
+ ] # TODO, use this?
+
+ # TODO: cached doesn't really work, but needed?
+ # cached_intensity_func, transform_to_cache = create_cached_function(
+ # unfolded_expression,
+ # parameters=model.parameter_defaults,
+ # free_parameters=free_parameter_symbols,
+ # backend="jax",
+ # )
+ # cached_data = transform_to_cache(data)
+ # cached_phsp = transform_to_cache(phsp)
+
+ # data conversion
+ # phsp_zfit = zfit.Data.from_pandas(phsp_frame)
+ # data_zfit = zfit.Data.from_pandas(data_frame)
+ # data_frame = data_frame.astype(np.float64)
+ # phsp_frame = phsp_frame.astype(np.float64)
+ intensity = intensity_func
+
+ pdf = zcompwa.pdf.ComPWAPDF(
+ intensity=intensity,
+ norm=pd.DataFrame(phsp).astype(np.float64), # there are complex numbers in the norm
+ )
+
+ # pdf = zcompwa.pdf.ComPWAPDF(
+ # intensity=intensity,
+ # norm=phsp_frame,
+ # )
+
+ from tensorwaves.estimator import UnbinnedNLL
+
+ estimator = UnbinnedNLL(
+ intensity_func,
+ data=data,
+ phsp=phsp,
+ backend="tensorflow",
+ )
+
+
+ loss = zfit.loss.UnbinnedNLL(pdf, data_frame, options={'numgrad': True})
+
+ # cannot convert, cannot compare to the ComPWA gradient as it's not available or erros
+ # np.testing.assert_allclose(loss.gradient(), estimator.gradient(initial_parameters), rtol=1e-5)
+
+ minimizer = zfit.minimize.Minuit(verbosity=7, gradient=True)
+ # minimizer = zfit.minimize.Minuit(verbosity=7, gradient='zfit')
+ # minimizer = zfit.minimize.ScipyLBFGSBV1(verbosity=8)
+ # minimizer = zfit.minimize.ScipyBFGS(verbosity=9)
+ # minimizer = zfit.minimize.ScipyTrustKrylovV1(verbosity=8)
+ # minimizer = zfit.minimize.NLoptMMAV1(verbosity=9)
+ # minimizer = zfit.minimize.IpyoptV1(verbosity=8)
+ params = loss.get_params()
+ paramsfit = [p for p in params
+ if p.name in initial_parameters
+ or p.name.endswith('_REALPART') and p.name[:-9] in initial_parameters # if complex, parts are indep
+ or p.name.endswith('_IMAGPART') and p.name[:-9] in initial_parameters]
+ nll_estimator = zcompwa.loss.nll_from_estimator(estimator, numgrad=True)
+ _ = nll_estimator.value()
+ # TODO: works but is slow
+ gradient_est = nll_estimator.gradient(list(nll_estimator.get_params())[:2])
+ assert not any(np.isnan(gradient_est))
+ gradient_zfit = loss.gradient(list(loss.get_params())[:2])
+ assert not any(np.isnan(gradient_zfit))
+ # np.testing.assert_allclose(gradient_est, gradient_zfit, rtol=1e-5)
+
+ from tensorwaves.optimizer import Minuit2
+
+ minuit2 = Minuit2(
+ use_analytic_gradient=False,
+ )
+ fit_result = minuit2.optimize(estimator, initial_parameters)
+ # print(fit_result)
+
+ with zfit.param.set_values(params, params):
+ result = minimizer.minimize(loss, params=paramsfit)
+ # print(result)
+ # TODO: test values? But ComPWA has bad values
+ # for p in paramsfit:
+ # if p.name.endswith('_REALPART') or p.name.endswith('_IMAGPART'):
+ # continue
+ # if p.name not in initial_parameters:
+ # print(f'Not in initial, ERROR: {p.name}')
+ # continue
+ # comp = fit_result.parameter_values[p.name]
+ # print(f"{p.name}, diff {p - comp}: {p.numpy()}, {comp}")
+ result.hesse()
+ # print(result)
+ assert result.valid
+ tol = 0.05 # 10% of 1 sigma
+ assert result.fmin - tol < fit_result.estimator_value # ComPWA doesn't minimize well, if this fails, we can relax it
+ assert pytest.approx(result.fmin, abs=0.5) == fit_result.estimator_value
diff --git a/tests/roofit/test_loss_compat.py b/tests/roofit/test_loss_compat.py
new file mode 100644
index 0000000..8b9db51
--- /dev/null
+++ b/tests/roofit/test_loss_compat.py
@@ -0,0 +1,67 @@
+import numpy as np
+import pytest
+
+
+def test_loss_registry():
+ _ = pytest.importorskip("ROOT")
+ # Copyright (c) 2024 zfit
+
+ import zfit
+
+ import zfit_physics.roofit as zroofit
+
+ # create space
+ obs = zfit.Space("x", -2, 3)
+
+ # parameters
+ mu = zfit.Parameter("mu", 1.2, -4, 6)
+ sigma = zfit.Parameter("sigma", 1.3, 0.5, 10)
+
+ # model building, pdf creation
+ gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
+
+ # data
+ ndraw = 10_000
+ data = np.random.normal(loc=2.0, scale=3.0, size=ndraw)
+ data = obs.filter(data) # works also for pandas DataFrame
+
+ from ROOT import RooArgSet, RooDataSet, RooFit, RooGaussian, RooRealVar
+
+ mur = RooRealVar("mu", "mu", 1.2, -4, 6)
+ sigmar = RooRealVar("sigma", "sigma", 1.3, 0.5, 10)
+ obsr = RooRealVar("x", "x", -2, 3)
+ gaussr = RooGaussian("gauss", "gauss", obsr, mur, sigmar)
+
+ datar = RooDataSet("data", "data", {obsr})
+ for d in data:
+ obsr.setVal(d)
+ datar.add(RooArgSet(obsr))
+
+ # create a loss function
+ nll = gaussr.createNLL(datar)
+
+ nllz = zfit.loss.UnbinnedNLL(model=gauss, data=data)
+
+ # create a minimizer
+ tol = 1e-3
+ verbosity = 0
+ minimizer = zfit.minimize.Minuit(gradient=True, verbosity=verbosity, tol=tol, mode=1)
+ minimizerzgrad = zfit.minimize.Minuit(gradient=False, verbosity=verbosity, tol=tol, mode=1)
+
+ params = nllz.get_params()
+ initvals = np.array(params)
+
+ with zfit.param.set_values(params, initvals):
+ result = minimizer.minimize(nllz)
+
+ with zfit.param.set_values(params, initvals):
+ result2 = minimizer.minimize(nll)
+
+ assert result.params['mu']['value'] == pytest.approx(result2.params['mu']['value'], rel=1e-3)
+ assert result.params['sigma']['value'] == pytest.approx(result2.params['sigma']['value'], rel=1e-3)
+
+ with zfit.param.set_values(params, params):
+ result4 = minimizerzgrad.minimize(nll)
+
+ assert result.params['mu']['value'] == pytest.approx(result4.params['mu']['value'], rel=1e-3)
+ assert result.params['sigma']['value'] == pytest.approx(result4.params['sigma']['value'], rel=1e-3)
diff --git a/tests/tfpwa/test_basic_example_tfpwa.py b/tests/tfpwa/test_basic_example_tfpwa.py
index 2d94673..bd686ea 100644
--- a/tests/tfpwa/test_basic_example_tfpwa.py
+++ b/tests/tfpwa/test_basic_example_tfpwa.py
@@ -59,7 +59,7 @@ def test_example1_tfpwa():
# Set init paramters. If not set, we will use random initial parameters
config.set_params(str(this_dir / "gen_params.json"))
- with chdir(this_dir):
+ with chdir(this_dir): # needed for TF-PWA
fcn = config.get_fcn()
nll = ztfpwa.loss.nll_from_fcn(fcn)