Skip to content

Commit

Permalink
Merge pull request #127 from mraspaud/refactor-calibration
Browse files Browse the repository at this point in the history
Extract calibration modules from the reader
  • Loading branch information
mraspaud authored Aug 30, 2024
2 parents 60f1849 + 38d4bb5 commit d95d657
Show file tree
Hide file tree
Showing 34 changed files with 1,094 additions and 855 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ fail_fast: false
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: 'v0.3.2'
rev: 'v0.6.2'
hooks:
- id: ruff
args: [--fix]
Expand Down
1 change: 1 addition & 0 deletions continuous_integration/environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ dependencies:
- packaging
- pytest
- pytest-cov
- xarray
- pip
- pip:
- docutils
Expand Down
35 changes: 17 additions & 18 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys, os

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
Expand All @@ -25,33 +24,33 @@

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.inheritance_diagram']
extensions = ["sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.todo",
"sphinx.ext.inheritance_diagram"]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]

# The suffix of source filenames.
source_suffix = '.rst'
source_suffix = ".rst"

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'
master_doc = "index"

# General information about the project.
project = u'pygac'
copyright = u'2014, Abhay Devasthale, Martin Raspaud and Adam Dybbroe'
project = u"pygac"
copyright = u"2014, Abhay Devasthale, Martin Raspaud and Adam Dybbroe"

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = '0.1'
release = "0.1"

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down Expand Up @@ -82,7 +81,7 @@
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
pygments_style = "sphinx"

# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
Expand All @@ -92,7 +91,7 @@

# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_theme = "default"

# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
Expand Down Expand Up @@ -121,7 +120,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
Expand Down Expand Up @@ -165,7 +164,7 @@
#html_file_suffix = None

# Output file base name for HTML help builder.
htmlhelp_basename = 'pygacdoc'
htmlhelp_basename = "pygacdoc"


# -- Options for LaTeX output --------------------------------------------------
Expand All @@ -179,8 +178,8 @@
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pygac.tex', u'pygac Documentation',
u'Abhay Devasthale, Martin Raspaud and Adam Dybbroe', 'manual'),
("index", "pygac.tex", u"pygac Documentation",
u"Abhay Devasthale, Martin Raspaud and Adam Dybbroe", "manual"),
]

# The name of an image file (relative to this directory) to place at the top of
Expand Down Expand Up @@ -212,6 +211,6 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pygac', u'pygac Documentation',
[u'Abhay Devasthale, Martin Raspaud and Adam Dybbroe'], 1)
("index", "pygac", u"pygac Documentation",
[u"Abhay Devasthale, Martin Raspaud and Adam Dybbroe"], 1)
]
3 changes: 2 additions & 1 deletion pygac/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,11 @@
"install').")

import logging

from pygac.configuration import get_config, read_config_file # noqa
from pygac.runner import get_reader_class, process_file # noqa

# add a NullHandler to prevent messages in sys.stderr if the using application does
# not use logging, but pygac makes logging calls of severity WARNING and greater.
# See https://docs.python.org/3/howto/logging.html (Configuring Logging for a Library)
logging.getLogger('pygac').addHandler(logging.NullHandler())
logging.getLogger("pygac").addHandler(logging.NullHandler())
143 changes: 105 additions & 38 deletions pygac/calibration.py → pygac/calibration/noaa.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,78 @@
LOG = logging.getLogger(__name__)


def calibrate(ds, custom_coeffs=None, coeffs_file=None):
"""Calibrate the dataset `ds`, possibly using custom_coeffs or a coeff file.
Args:
ds: xarray Dataset object containing the data to calibrate.
custom_calibration: dictionary with a subset of user defined satellite specific
calibration coefficients
calibration_file: path to json file containing default calibrations
"""
channels = ds["channels"].data
times = ds.coords["times"]
scan_line_numbers = ds["scan_line_index"].data

calibration_coeffs = Calibrator(
ds.attrs["spacecraft_name"],
custom_coeffs=custom_coeffs,
coeffs_file=coeffs_file
)

# `times` is in nanosecond resolution. However, convertion from datetime64[ns] to datetime objects
# does not work, and anyway only the date is needed.
start_time = times[0].dt
year = start_time.year.item()
jday = start_time.dayofyear.item()


corr = ds.attrs["sun_earth_distance_correction_factor"]

# how many reflective channels are there ?
tot_ref = channels.shape[2] - 3

channels[:, :, 0:tot_ref] = calibrate_solar(
channels[:, :, 0:tot_ref],
np.arange(tot_ref),
year, jday,
calibration_coeffs,
corr
)

prt = ds["prt_counts"].data
ict = ds["ict_counts"].data
space = ds["space_counts"].data

ir_channels_to_calibrate = [3, 4, 5]

for chan in ir_channels_to_calibrate:
channels[:, :, chan - 6] = calibrate_thermal(
channels[:, :, chan - 6],
prt,
ict[:, chan - 3],
space[:, chan - 3],
scan_line_numbers,
chan,
calibration_coeffs
)

new_ds = ds.copy()
new_ds["channels"].data = channels

new_ds.attrs["calib_coeffs_version"] = calibration_coeffs.version

return new_ds


class CoeffStatus(Enum):
"""Indicates the status of calibration coefficients."""
NOMINAL = 'nominal'
PROVISIONAL = 'provisional'
EXPERIMENTAL = 'experimental'
NOMINAL = "nominal"
PROVISIONAL = "provisional"
EXPERIMENTAL = "experimental"


class Calibrator(object):
class Calibrator:
"""Factory class to create namedtuples holding the calibration coefficients.
Attributes:
Expand All @@ -56,17 +120,17 @@ class Calibrator(object):
default_coeffs: dictonary containing default values for all spacecrafts
"""
version_hashs = {
'963af9b66268475ed500ad7b37da33c5': {
'name': 'PATMOS-x, v2017r1',
'status': CoeffStatus.NOMINAL
"963af9b66268475ed500ad7b37da33c5": {
"name": "PATMOS-x, v2017r1",
"status": CoeffStatus.NOMINAL
},
'689386c822de18a07194ac7fd71652ea': {
'name': 'PATMOS-x, v2017r1, with provisional coefficients for MetOp-C',
'status': CoeffStatus.PROVISIONAL
"689386c822de18a07194ac7fd71652ea": {
"name": "PATMOS-x, v2017r1, with provisional coefficients for MetOp-C",
"status": CoeffStatus.PROVISIONAL
},
'e8735ec394ecdb87b7edcd261e72d2eb': {
'name': 'PATMOS-x, v2023',
'status': CoeffStatus.PROVISIONAL
"e8735ec394ecdb87b7edcd261e72d2eb": {
"name": "PATMOS-x, v2023",
"status": CoeffStatus.PROVISIONAL
},
}
fields = [
Expand All @@ -75,7 +139,7 @@ class Calibrator(object):
"to_eff_blackbody_slope", "date_of_launch", "d", "spacecraft", "version"
]

Calibrator = namedtuple('Calibrator', fields)
Calibrator = namedtuple("Calibrator", fields)
default_coeffs = None
default_file = None
default_version = None
Expand Down Expand Up @@ -112,21 +176,21 @@ def __new__(cls, spacecraft, custom_coeffs=None, coeffs_file=None):
for key in ("dark_count", "gain_switch", "s0", "s1", "s2"):
arraycoeffs[key] = np.array([
coeffs[channel][key]
for channel in ('channel_1', 'channel_2', 'channel_3a')
for channel in ("channel_1", "channel_2", "channel_3a")
], dtype=float)
# thermal channels
for key in ("centroid_wavenumber", "space_radiance",
"to_eff_blackbody_intercept", "to_eff_blackbody_slope"):
arraycoeffs[key] = np.array([
coeffs[channel][key]
for channel in ('channel_3b', 'channel_4', 'channel_5')
for channel in ("channel_3b", "channel_4", "channel_5")
], dtype=float)
arraycoeffs["b"] = np.array([
[
coeffs[channel][key]
for key in ("b0", "b1", "b2")
]
for channel in ('channel_3b', 'channel_4', 'channel_5')
for channel in ("channel_3b", "channel_4", "channel_5")
], dtype=float)
# thermometers
# Note, that "thermometer_0" does not exists, and is filled with zeros to
Expand All @@ -139,7 +203,7 @@ def __new__(cls, spacecraft, custom_coeffs=None, coeffs_file=None):
for d in range(5)
], dtype=float)
# parse date of launch
date_of_launch_str = coeffs["date_of_launch"].replace('Z', '+00:00')
date_of_launch_str = coeffs["date_of_launch"].replace("Z", "+00:00")
if sys.version_info < (3, 7):
# Note that here any time information is lost
import dateutil.parser
Expand Down Expand Up @@ -174,7 +238,7 @@ def read_coeffs(cls, coeffs_file):
else:
LOG.debug("Read PyGAC internal calibration coefficients.")
coeffs_file = files("pygac") / "data/calibration.json"
with open(coeffs_file, mode='rb') as json_file:
with open(coeffs_file, mode="rb") as json_file:
content = json_file.read()
coeffs = json.loads(content)
version = cls._get_coeffs_version(content)
Expand All @@ -187,10 +251,10 @@ def _get_coeffs_version(cls, coeff_file_content):
digest = md5_hash.hexdigest()
version_dict = cls.version_hashs.get(
digest,
{'name': None, 'status': None}
{"name": None, "status": None}
)
version = version_dict['name']
status = version_dict['status']
version = version_dict["name"]
status = version_dict["status"]
if version is None:
warning = "Unknown calibration coefficients version!"
warnings.warn(warning, RuntimeWarning)
Expand All @@ -199,7 +263,7 @@ def _get_coeffs_version(cls, coeff_file_content):
LOG.info('Identified calibration coefficients version "%s".',
version)
if status != CoeffStatus.NOMINAL:
warning = 'Using {} calibration coefficients'.format(status)
warning = "Using {} calibration coefficients".format(status)
warnings.warn(warning, RuntimeWarning)
LOG.warning(warning)
return version
Expand Down Expand Up @@ -393,14 +457,15 @@ def calibrate_thermal(counts, prt, ict, space, line_numbers, channel, cal):
# Note that the prt values are the average value of the three readings from one of the four
# PRTs. See reader.get_telemetry implementations.
prt_threshold = 50 # empirically found and set by Abhay Devasthale
offset = 0

for i, prt_val in enumerate(prt):
for offset in range(5):
# According to the KLM Guide the fill value between PRT measurments is 0, but we search
# for the first measurment gap using the threshold. Is this on purpose?
if prt_val < prt_threshold:
offset = i
# for the first measurement gap using the threshold, because the fill value is in practice
# not always exactly 0.
if np.median(prt[(line_numbers - line_numbers[0]) % 5 == offset]) < prt_threshold:
break
else:
raise IndexError("No PRT 0-index found!")

# get the PRT index, iprt equals to 0 corresponds to the measurement gaps
iprt = (line_numbers - line_numbers[0] + 5 - offset) % 5
Expand Down Expand Up @@ -450,16 +515,18 @@ def calibrate_thermal(counts, prt, ict, space, line_numbers, channel, cal):
if channel == 3:
zeros = ict < ict_threshold
nonzeros = np.logical_not(zeros)

ict[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
ict[nonzeros])
try:
ict[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
ict[nonzeros])
except ValueError: # 3b has no valid data
return counts
zeros = space < space_threshold
nonzeros = np.logical_not(zeros)

space[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
space[nonzeros])
(nonzeros).nonzero()[0],
space[nonzeros])

# convolving and smoothing PRT, ICT and SPACE values
if lines > 51:
Expand All @@ -468,9 +535,9 @@ def calibrate_thermal(counts, prt, ict, space, line_numbers, channel, cal):
wlength = 3

weighting_function = np.ones(wlength, dtype=float) / wlength
tprt_convolved = np.convolve(tprt, weighting_function, 'same')
ict_convolved = np.convolve(ict, weighting_function, 'same')
space_convolved = np.convolve(space, weighting_function, 'same')
tprt_convolved = np.convolve(tprt, weighting_function, "same")
ict_convolved = np.convolve(ict, weighting_function, "same")
space_convolved = np.convolve(space, weighting_function, "same")

# take care of the beginning and end
tprt_convolved[0:(wlength - 1) // 2] = tprt_convolved[(wlength - 1) // 2]
Expand All @@ -491,7 +558,7 @@ def calibrate_thermal(counts, prt, ict, space, line_numbers, channel, cal):
# TsBB = A + B*TBB (7.1.2.4-3)
# NBB = c1*nu_e^3/(exp(c2*nu_e/TsBB) - 1) (7.1.2.4-3)
# where the constants of the Planck function are defined as c1 = 2*h*c^2, c2 = h*c/k_B.
# constatns
# constants
c1 = 1.1910427e-5 # mW/m^2/sr/cm^{-4}
c2 = 1.4387752 # cm K
# coefficients
Expand Down
Loading

0 comments on commit d95d657

Please sign in to comment.