Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remaining bugs with duplicate nodes, add grid developer guide #409

Merged
merged 11 commits into from
Feb 8, 2023
51 changes: 46 additions & 5 deletions desc/compute/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import numpy as np
from termcolor import colored

from desc.backend import jnp
from desc.backend import jnp, put
from desc.grid import ConcentricGrid

from .data_index import data_index
Expand Down Expand Up @@ -442,6 +442,9 @@ def _get_grid_surface(grid, surface_label):
The differential elements (dtheta * dzeta for rho surface).
max_surface_val : float
The supremum of this surface_label.
has_endpoint_dupe : bool
Whether this surface label's nodes have a duplicate at the endpoint
of a periodic domain. (e.g. a node at 0 and 2pi).

"""
assert surface_label in {"rho", "theta", "zeta"}
Expand All @@ -459,9 +462,15 @@ def _get_grid_surface(grid, surface_label):
nodes = grid.nodes[:, 2]
unique_idx = grid.unique_zeta_idx
ds = grid.spacing[:, :2].prod(axis=1)
max_surface_val = 2 * jnp.pi
max_surface_val = 2 * jnp.pi / grid.NFP

return nodes, unique_idx, ds, max_surface_val
assert nodes[unique_idx[-1]] <= max_surface_val
has_endpoint_dupe = (
surface_label != "rho"
and nodes[unique_idx[0]] == 0
and nodes[unique_idx[-1]] == max_surface_val
)
return nodes, unique_idx, ds, max_surface_val, has_endpoint_dupe


def compress(grid, x, surface_label="rho"):
Expand Down Expand Up @@ -563,9 +572,12 @@ def surface_integrals(grid, q=jnp.array([1]), surface_label="rho", max_surface=F
)

q = jnp.atleast_1d(q)
nodes, unique_idx, ds, max_surface_val = _get_grid_surface(grid, surface_label)
nodes, unique_idx, ds, max_surface_val, has_endpoint_dupe = _get_grid_surface(
grid, surface_label
)

if max_surface:
# assumes surface label is zeta, does a line integral
max_rho = grid.nodes[grid.unique_rho_idx[-1], 0]
idx = np.nonzero(grid.nodes[:, 0] == max_rho)[0]
q = q[idx]
Expand All @@ -578,7 +590,36 @@ def surface_integrals(grid, q=jnp.array([1]), surface_label="rho", max_surface=F
# Each is assigned a weight of their contribution to the integral.
# The elements of each bin are summed, performing the integration.
bins = jnp.append(nodes[unique_idx], max_surface_val)
integrals = jnp.histogram(nodes, bins=bins, weights=ds * q)[0]

# to group duplicate nodes together
nodes_modulo = nodes % max_surface_val if has_endpoint_dupe else nodes
# Longer explanation:
# Imagine a torus cross-section at zeta=pi.
# A grid with a duplicate zeta=pi node has 2 of those cross-sections.
# In grid.py, we multiply by 1/n the areas of surfaces with
# duplicity n. This prevents the area of that surface from being
# double-counted, as surfaces with the same node value are combined
# into 1 integral, which sums their areas. Thus, if the zeta=pi
# cross-section has duplicity 2, we ensure that the area on the zeta=pi
# surface will have the correct total area of pi+pi = 2pi.
# An edge case exists if the duplicate surface has nodes with
# different values for the surface label, which only occurs when
# has_endpoint_dupe is true. If has_endpoint_dupe is true, this grid
# has a duplicate surface at surface_label=0 and
# surface_label=max_surface_val. Although the modulo of these values
# are equal, their numeric values are not, so jnp.histogram would treat
# them as different surfaces. We solve this issue by modulating
# 'nodes', so that the duplicate surface has nodes with the same
# surface label and is treated as one, like in the previous paragraph.

integrals = jnp.histogram(nodes_modulo, bins=bins, weights=ds * q)[0]
if has_endpoint_dupe:
# By modulating nodes, we 'moved' all the area from
# surface_label=max_surface_val to the surface_label=0 surface.
# With the correct value of the integral on this surface stored
# in integrals[0], we copy it to the duplicate surface at integrals[-1].
assert integrals[-1] == 0
integrals = put(integrals, -1, integrals[0])
return expand(grid, integrals, surface_label)


Expand Down
60 changes: 42 additions & 18 deletions desc/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,10 @@ def _scale_weights(self, dtheta_scale):
# duplicates nodes are scaled down properly regardless of which two columns
# span the surface.

# scale areas sum to full area
# The following operation is not a general solution to return the weight
# removed from the duplicate nodes back to the unique nodes.
# For this reason, duplicates should typically be deleted rather that rescaled.
# For this reason, duplicates should typically be deleted rather than rescaled.
# Note we multiply each column by duplicates^(1/6) to account for the extra
# division by duplicates^(1/2) in one of the columns above.
self._spacing *= (
Expand All @@ -186,7 +187,9 @@ def _create_nodes(self, nodes):
Node spacing, in (rho,theta,zeta).

"""
nodes = np.atleast_2d(nodes).reshape((-1, 3))
nodes = np.atleast_2d(nodes).reshape((-1, 3)).astype(float)
nodes[nodes[:, 1] > 2 * np.pi, 1] %= 2 * np.pi
nodes[nodes[:, 2] > 2 * np.pi / self.NFP, 2] %= 2 * np.pi / self.NFP
spacing = ( # make weights sum to 4pi^2
np.ones_like(nodes) * np.array([1, 2 * np.pi, 2 * np.pi]) / nodes.shape[0]
)
Expand Down Expand Up @@ -342,6 +345,7 @@ class LinearGrid(Grid):
endpoint : bool
If True, theta=0 and zeta=0 are duplicated after a full period.
Should be False for use with FFT. (Default = False).
This boolean is ignored if an array is given for theta or zeta.
rho : ndarray of float, optional
Radial coordinates (Default = 1.0).
theta : ndarray of float, optional
Expand Down Expand Up @@ -423,6 +427,7 @@ def _create_nodes( # noqa: C901
endpoint : bool
If True, theta=0 and zeta=0 are duplicated after a full period.
Should be False for use with FFT. (Default = False).
This boolean is ignored if an array is given for theta or zeta.
rho : int or ndarray of float, optional
Radial coordinates (Default = 1.0).
Alternatively, the number of radial coordinates (if an integer).
Expand Down Expand Up @@ -451,7 +456,8 @@ def _create_nodes( # noqa: C901
# choose dr such that each node has the same weight
dr = 1 / r.size * np.ones_like(r)
else:
r = np.atleast_1d(rho)
# need to sort to compute correct spacing
r = np.sort(np.atleast_1d(rho))
dr = np.zeros_like(r)
if r.size > 1:
# choose dr such that cumulative sums of dr[] are node midpoints
Expand All @@ -472,16 +478,19 @@ def _create_nodes( # noqa: C901
self._M = len(np.atleast_1d(theta))
if np.isscalar(theta) and (int(theta) == theta) and theta > 0:
t = np.linspace(0, 2 * np.pi, int(theta), endpoint=endpoint)
if self.sym:
if self.sym and t.size > 1:
t += t[1] / 2
dt = 2 * np.pi / t.size * np.ones_like(t)
if endpoint and t.size > 1:
if (endpoint and not self.sym) and t.size > 1:
# increase node weight to account for duplicate node
dt *= t.size / (t.size - 1)
# scale_weights() will reduce endpoint (dt[0] and dt[-1])
# duplicate node weight
else:
t = np.atleast_1d(theta)
t = np.atleast_1d(theta).astype(float)
# need to sort to compute correct spacing
t[t > 2 * np.pi] %= 2 * np.pi
t = np.sort(t)
dt = np.zeros_like(t)
if t.size > 1:
# choose dt to be half the cyclic distance of the surrounding two nodes
Expand All @@ -492,13 +501,21 @@ def _create_nodes( # noqa: C901
dt /= 2
if t.size == 2:
dt[-1] = dt[0]
if endpoint:
if t[0] == 0 and t[-1] == SUP:
# The cyclic distance algorithm above correctly weights
# the duplicate node spacing at theta = 0 and 2pi.
# However, scale_weights() is not aware of this, so we multiply
# by 2 to counteract the reduction that will be done there.
dt[0] *= 2
dt[-1] *= 2
# the duplicate endpoint node spacing at theta = 0 and 2pi
# to be half the weight of the other nodes.
if not self.sym:
# However, scale_weights() is not aware of this, so we
# counteract the reduction that will be done there.
dt[0] += dt[-1]
dt[-1] = dt[0]
else:
# Symmetry deletion will delete the duplicate node at 2pi.
# Need to move weight from non-duplicate nodes back to the
# node at theta = 0 pi.
dt[0] += dt[-1]
dt *= (t.size - 1) / t.size
else:
dt = np.array([2 * np.pi])

Expand All @@ -519,7 +536,10 @@ def _create_nodes( # noqa: C901
# scale_weights() will reduce endpoint (dz[0] and dz[-1])
# duplicate node weight
else:
z = np.atleast_1d(zeta)
z = np.atleast_1d(zeta).astype(float)
# need to sort to compute correct spacing
z[z > 2 * np.pi / self.NFP] %= 2 * np.pi / self.NFP
z = np.sort(z)
dz = np.zeros_like(z)
if z.size > 1:
# choose dz to be half the cyclic distance of the surrounding two nodes
Expand All @@ -531,16 +551,20 @@ def _create_nodes( # noqa: C901
dz *= self.NFP
if z.size == 2:
dz[-1] = dz[0]
if endpoint:
if z[0] == 0 and z[-1] == SUP:
# The cyclic distance algorithm above correctly weights
# the duplicate node spacing at zeta = 0 and 2pi / NFP.
# However, _scale_weights() is not aware of this, so we multiply
# by 2 to counteract the reduction that will be done there.
dz[0] *= 2
dz[-1] *= 2
# However, scale_weights() is not aware of this, so we
# counteract the reduction that will be done there.
dz[0] += dz[-1]
dz[-1] = dz[0]
else:
dz = np.array([2 * np.pi])

self._endpoint = (t[0] == 0 and t[-1] == 2 * np.pi) and (
z[0] == 0 and z[-1] == 2 * np.pi / self.NFP
)

r, t, z = np.meshgrid(r, t, z, indexing="ij")
r = r.flatten()
t = t.flatten()
Expand Down
1 change: 1 addition & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@

compute
objectives
notebooks/dev_guide/grid.ipynb

.. toctree::
:maxdepth: 1
Expand Down
1,367 changes: 1,367 additions & 0 deletions docs/notebooks/dev_guide/grid.ipynb

Large diffs are not rendered by default.

Loading