diff --git a/botorch/acquisition/analytic.py b/botorch/acquisition/analytic.py index eb8e0ac026..265677d8bf 100644 --- a/botorch/acquisition/analytic.py +++ b/botorch/acquisition/analytic.py @@ -1116,7 +1116,7 @@ def _get_noiseless_fantasy_model( # Not transforming Yvar because 1e-7 is already close to 0 and it is a # relative, not absolute, value. Y_fantasized, _ = outcome_transform( - Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1) + Y_fantasized.unsqueeze(-1), Yvar.unsqueeze(-1), X=batch_X_observed ) Y_fantasized = Y_fantasized.squeeze(-1) input_transform = getattr(model, "input_transform", None) diff --git a/botorch/models/approximate_gp.py b/botorch/models/approximate_gp.py index c934196d1b..bc6021b660 100644 --- a/botorch/models/approximate_gp.py +++ b/botorch/models/approximate_gp.py @@ -172,7 +172,7 @@ def posterior( posterior = GPyTorchPosterior(distribution=dist) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) if posterior_transform is not None: posterior = posterior_transform(posterior) return posterior @@ -397,7 +397,7 @@ def __init__( UserInputWarning, stacklevel=3, ) - train_Y, _ = outcome_transform(train_Y) + train_Y, _ = outcome_transform(train_Y, X=transformed_X) self._validate_tensor_args(X=transformed_X, Y=train_Y) validate_input_scaling(train_X=transformed_X, train_Y=train_Y) if train_Y.shape[-1] != num_outputs: diff --git a/botorch/models/ensemble.py b/botorch/models/ensemble.py index 9ac9b945a5..91f267d005 100644 --- a/botorch/models/ensemble.py +++ b/botorch/models/ensemble.py @@ -79,7 +79,7 @@ def posterior( # `posterior` (as is done in GP models). This is more general since it works # even if the transform doesn't support `untransform_posterior`. if hasattr(self, "outcome_transform"): - values, _ = self.outcome_transform.untransform(values) + values, _ = self.outcome_transform.untransform(values, X=X) if output_indices is not None: values = values[..., output_indices] posterior = EnsemblePosterior(values=values) diff --git a/botorch/models/fully_bayesian.py b/botorch/models/fully_bayesian.py index 8b88423634..dbe4740d05 100644 --- a/botorch/models/fully_bayesian.py +++ b/botorch/models/fully_bayesian.py @@ -373,7 +373,9 @@ def __init__( X=train_X, input_transform=input_transform ) if outcome_transform is not None: - train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) + train_Y, train_Yvar = outcome_transform( + Y=train_Y, Yvar=train_Yvar, X=transformed_X + ) self._validate_tensor_args(X=transformed_X, Y=train_Y) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar diff --git a/botorch/models/fully_bayesian_multitask.py b/botorch/models/fully_bayesian_multitask.py index 500509e330..0fd358e427 100644 --- a/botorch/models/fully_bayesian_multitask.py +++ b/botorch/models/fully_bayesian_multitask.py @@ -242,7 +242,9 @@ def __init__( ) if outcome_transform is not None: outcome_transform.train() # Ensure we learn parameters here on init - train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) + train_Y, train_Yvar = outcome_transform( + Y=train_Y, Yvar=train_Yvar, X=transformed_X + ) if train_Yvar is not None: # Clamp after transforming train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL) diff --git a/botorch/models/gp_regression.py b/botorch/models/gp_regression.py index 1e8ba53c3a..af8a5cc6ee 100644 --- a/botorch/models/gp_regression.py +++ b/botorch/models/gp_regression.py @@ -160,7 +160,9 @@ def __init__( X=train_X, input_transform=input_transform ) if outcome_transform is not None: - train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) + train_Y, train_Yvar = outcome_transform( + Y=train_Y, Yvar=train_Yvar, X=transformed_X + ) # Validate again after applying the transforms self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar) ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None) diff --git a/botorch/models/gpytorch.py b/botorch/models/gpytorch.py index 82d4cbd580..39052e600b 100644 --- a/botorch/models/gpytorch.py +++ b/botorch/models/gpytorch.py @@ -198,7 +198,7 @@ def posterior( mvn = self.likelihood(mvn, X) posterior = GPyTorchPosterior(distribution=mvn) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) if posterior_transform is not None: return posterior_transform(posterior) return posterior @@ -244,7 +244,7 @@ def condition_on_observations( # (unless we've already trasnformed if BatchedMultiOutputGPyTorchModel) if not isinstance(self, BatchedMultiOutputGPyTorchModel): # `noise` is assumed to already be outcome-transformed. - Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar) + Y, _ = self.outcome_transform(Y=Y, Yvar=Yvar, X=X) # Validate using strict=False, since we cannot tell if Y has an explicit # output dimension. Do not check shapes when fantasizing as they are # not expected to match. @@ -467,7 +467,7 @@ def posterior( posterior = GPyTorchPosterior(distribution=mvn) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) if posterior_transform is not None: return posterior_transform(posterior) return posterior @@ -511,7 +511,7 @@ def condition_on_observations( if hasattr(self, "outcome_transform"): # We need to apply transforms before shifting batch indices around. # `noise` is assumed to already be outcome-transformed. - Y, _ = self.outcome_transform(Y) + Y, _ = self.outcome_transform(Y, X=X) # Do not check shapes when fantasizing as they are not expected to match. if fantasize_flag.off(): self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False) @@ -924,7 +924,7 @@ def posterior( ) posterior = GPyTorchPosterior(distribution=mtmvn) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) if posterior_transform is not None: return posterior_transform(posterior) return posterior diff --git a/botorch/models/higher_order_gp.py b/botorch/models/higher_order_gp.py index 27bb54134d..faa4eccc11 100644 --- a/botorch/models/higher_order_gp.py +++ b/botorch/models/higher_order_gp.py @@ -91,7 +91,7 @@ def _return_to_output_shape(self, tsr: Tensor) -> Tensor: return out def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: @@ -107,7 +107,7 @@ def forward( return Y_out, Yvar_out def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: @@ -121,7 +121,7 @@ def untransform( return Y, Yvar def untransform_posterior( - self, posterior: HigherOrderGPPosterior + self, posterior: HigherOrderGPPosterior, X: Tensor | None = None ) -> TransformedPosterior: # TODO: return a HigherOrderGPPosterior once rescaling constant # muls * LinearOperators won't force a dense decomposition rather than a @@ -227,7 +227,7 @@ def __init__( output_shape=train_Y.shape[-num_output_dims:], batch_shape=batch_shape, ) - train_Y, _ = outcome_transform(train_Y) + train_Y, _ = outcome_transform(train_Y, X=train_X) self._aug_batch_shape = batch_shape self._num_dimensions = num_output_dims + 1 @@ -416,7 +416,7 @@ def condition_on_observations( """ if hasattr(self, "outcome_transform"): # we need to apply transforms before shifting batch indices around - Y, noise = self.outcome_transform(Y=Y, Yvar=noise) + Y, noise = self.outcome_transform(Y=Y, Yvar=noise, X=X) # Do not check shapes when fantasizing as they are not expected to match. if fantasize_flag.off(): self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False) @@ -540,7 +540,7 @@ def posterior( num_outputs=self._num_outputs, ) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) return posterior def make_posterior_variances( diff --git a/botorch/models/latent_kronecker_gp.py b/botorch/models/latent_kronecker_gp.py index a205c9c0fe..515c3db83c 100644 --- a/botorch/models/latent_kronecker_gp.py +++ b/botorch/models/latent_kronecker_gp.py @@ -81,7 +81,7 @@ def __init__( self._use_min = use_min def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Standardize outcomes. @@ -93,6 +93,7 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: A two-tuple with the transformed outcomes: @@ -240,7 +241,9 @@ def __init__( outcome_transform = MinMaxStandardize(batch_shape=batch_shape) if outcome_transform is not None: # transform outputs once and keep the results - train_Y = outcome_transform(train_Y.unsqueeze(-1))[0].squeeze(-1) + train_Y = outcome_transform(train_Y.unsqueeze(-1), X=transformed_X)[ + 0 + ].squeeze(-1) ExactGP.__init__( self, @@ -506,7 +509,7 @@ def _rsample_from_base_samples( ) # samples.shape = (*sample_shape, *broadcast_shape, n_test_x, n_t) if hasattr(self, "outcome_transform") and self.outcome_transform is not None: - samples, _ = self.outcome_transform.untransform(samples) + samples, _ = self.outcome_transform.untransform(samples, X=X) return samples def condition_on_observations( diff --git a/botorch/models/model_list_gp_regression.py b/botorch/models/model_list_gp_regression.py index 626dd2c279..e446508b28 100644 --- a/botorch/models/model_list_gp_regression.py +++ b/botorch/models/model_list_gp_regression.py @@ -117,7 +117,7 @@ def condition_on_observations( else: noise_i = torch.cat([noise[..., k] for k in range(i, j)], dim=-1) if hasattr(model, "outcome_transform"): - y_i, noise_i = model.outcome_transform(y_i, noise_i) + y_i, noise_i = model.outcome_transform(y_i, noise_i, X=X_i) if noise_i is not None: noise_i = noise_i.squeeze(0) targets.append(y_i) diff --git a/botorch/models/multitask.py b/botorch/models/multitask.py index 12cbc2fa87..eaf0fa41a5 100644 --- a/botorch/models/multitask.py +++ b/botorch/models/multitask.py @@ -219,7 +219,9 @@ def __init__( if outcome_transform == DEFAULT: outcome_transform = Standardize(m=1, batch_shape=train_X.shape[:-2]) if outcome_transform is not None: - train_Y, train_Yvar = outcome_transform(Y=train_Y, Yvar=train_Yvar) + train_Y, train_Yvar = outcome_transform( + Y=train_Y, Yvar=train_Yvar, X=transformed_X + ) # squeeze output dim train_Y = train_Y.squeeze(-1) @@ -464,7 +466,7 @@ def __init__( X=train_X, input_transform=input_transform ) if outcome_transform is not None: - train_Y, _ = outcome_transform(train_Y) + train_Y, _ = outcome_transform(train_Y, X=transformed_X) self._validate_tensor_args(X=transformed_X, Y=train_Y) self._num_outputs = train_Y.shape[-1] @@ -772,7 +774,7 @@ def posterior( ) if hasattr(self, "outcome_transform"): - posterior = self.outcome_transform.untransform_posterior(posterior) + posterior = self.outcome_transform.untransform_posterior(posterior, X=X) return posterior def train(self, val=True, *args, **kwargs): diff --git a/botorch/models/transforms/outcome.py b/botorch/models/transforms/outcome.py index 6f93c668a4..4c01204dd6 100644 --- a/botorch/models/transforms/outcome.py +++ b/botorch/models/transforms/outcome.py @@ -42,7 +42,7 @@ class OutcomeTransform(Module, ABC): @abstractmethod def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Transform the outcomes in a model's training targets @@ -50,6 +50,7 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: A two-tuple with the transformed outcomes: @@ -77,7 +78,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: ) def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-transform previously transformed outcomes @@ -85,6 +86,7 @@ def untransform( Y: A `batch_shape x n x m`-dim tensor of transfomred training targets. Yvar: A `batch_shape x n x m`-dim tensor of transformed observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: A two-tuple with the un-transformed outcomes: @@ -106,7 +108,9 @@ def _is_linear(self) -> bool: """ return False - def untransform_posterior(self, posterior: Posterior) -> Posterior: + def untransform_posterior( + self, posterior: Posterior, X: Tensor | None = None + ) -> Posterior: r"""Un-transform a posterior. Posteriors with `_is_linear=True` should return a `GPyTorchPosterior` when @@ -115,6 +119,7 @@ def untransform_posterior(self, posterior: Posterior) -> Posterior: Args: posterior: A posterior in the transformed space. + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: The un-transformed posterior. @@ -139,7 +144,7 @@ def __init__(self, **transforms: OutcomeTransform) -> None: super().__init__(OrderedDict(transforms)) def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Transform the outcomes in a model's training targets @@ -147,6 +152,7 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: A two-tuple with the transformed outcomes: @@ -155,7 +161,7 @@ def forward( - The transformed observation noise (if applicable). """ for tf in self.values(): - Y, Yvar = tf.forward(Y, Yvar) + Y, Yvar = tf.forward(Y=Y, Yvar=Yvar, X=X) return Y, Yvar def subset_output(self, idcs: list[int]) -> OutcomeTransform: @@ -172,7 +178,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: ) def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-transform previously transformed outcomes @@ -180,6 +186,7 @@ def untransform( Y: A `batch_shape x n x m`-dim tensor of transfomred training targets. Yvar: A `batch_shape x n x m`-dim tensor of transformed observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: A two-tuple with the un-transformed outcomes: @@ -188,7 +195,7 @@ def untransform( - The un-transformed observation noise (if applicable). """ for tf in reversed(self.values()): - Y, Yvar = tf.untransform(Y, Yvar) + Y, Yvar = tf.untransform(Y=Y, Yvar=Yvar, X=X) return Y, Yvar @property @@ -199,17 +206,20 @@ def _is_linear(self) -> bool: """ return all(octf._is_linear for octf in self.values()) - def untransform_posterior(self, posterior: Posterior) -> Posterior: + def untransform_posterior( + self, posterior: Posterior, X: Tensor | None = None + ) -> Posterior: r"""Un-transform a posterior Args: posterior: A posterior in the transformed space. + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). Returns: The un-transformed posterior. """ for tf in reversed(self.values()): - posterior = tf.untransform_posterior(posterior) + posterior = tf.untransform_posterior(posterior, X=X) return posterior @@ -250,7 +260,7 @@ def __init__( self._min_stdv = min_stdv def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Standardize outcomes. @@ -262,6 +272,9 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). + This argument is not used by this transform, but it is used by + its subclass, `StratifiedStandardize`. Returns: A two-tuple with the transformed outcomes: @@ -339,7 +352,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: return new_tf def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-standardize outcomes. @@ -347,6 +360,9 @@ def untransform( Y: A `batch_shape x n x m`-dim tensor of standardized targets. Yvar: A `batch_shape x n x m`-dim tensor of standardized observation noises associated with the targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform, but it is used by + its subclass, `StratifiedStandardize`. Returns: A two-tuple with the un-standardized outcomes: @@ -370,12 +386,15 @@ def _is_linear(self) -> bool: return True def untransform_posterior( - self, posterior: Posterior + self, posterior: Posterior, X: Tensor | None = None ) -> GPyTorchPosterior | TransformedPosterior: r"""Un-standardize the posterior. Args: posterior: A posterior in the standardized space. + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform, but it is used by + its subclass, `StratifiedStandardize`. Returns: The un-standardized posterior. If the input posterior is a @@ -487,7 +506,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: return new_tf def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Log-transform outcomes. @@ -495,6 +514,8 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the transformed outcomes: @@ -520,7 +541,7 @@ def forward( return Y_tf, Yvar def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-transform log-transformed outcomes @@ -529,6 +550,8 @@ def untransform( Yvar: A `batch_shape x n x m`-dim tensor of log- transformed observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the un-transformed outcomes: @@ -553,11 +576,15 @@ def untransform( ) return Y_utf, Yvar - def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: + def untransform_posterior( + self, posterior: Posterior, X: Tensor | None = None + ) -> TransformedPosterior: r"""Un-transform the log-transformed posterior. Args: posterior: A posterior in the log-transformed space. + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: The un-transformed posterior. @@ -616,7 +643,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: return new_tf def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Power-transform outcomes. @@ -624,6 +651,8 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the transformed outcomes: @@ -649,7 +678,7 @@ def forward( return Y_tf, Yvar def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-transform power-transformed outcomes @@ -658,6 +687,8 @@ def untransform( Yvar: A `batch_shape x n x m`-dim tensor of power-transformed observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the un-transformed outcomes: @@ -682,11 +713,15 @@ def untransform( ) return Y_utf, Yvar - def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: + def untransform_posterior( + self, posterior: Posterior, X: Tensor | None = None + ) -> TransformedPosterior: r"""Un-transform the power-transformed posterior. Args: posterior: A posterior in the power-transformed space. + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: The un-transformed posterior. @@ -741,7 +776,7 @@ def subset_output(self, idcs: list[int]) -> OutcomeTransform: return new_tf def forward( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Bilog-transform outcomes. @@ -749,6 +784,8 @@ def forward( Y: A `batch_shape x n x m`-dim tensor of training targets. Yvar: A `batch_shape x n x m`-dim tensor of observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of training inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the transformed outcomes: @@ -773,7 +810,7 @@ def forward( return Y_tf, Yvar def untransform( - self, Y: Tensor, Yvar: Tensor | None = None + self, Y: Tensor, Yvar: Tensor | None = None, X: Tensor | None = None ) -> tuple[Tensor, Tensor | None]: r"""Un-transform bilog-transformed outcomes @@ -782,6 +819,8 @@ def untransform( Yvar: A `batch_shape x n x m`-dim tensor of bilog-transformed observation noises associated with the training targets (if applicable). + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: A two-tuple with the un-transformed outcomes: @@ -806,11 +845,15 @@ def untransform( ) return Y_utf, Yvar - def untransform_posterior(self, posterior: Posterior) -> TransformedPosterior: + def untransform_posterior( + self, posterior: Posterior, X: Tensor | None = None + ) -> TransformedPosterior: r"""Un-transform the bilog-transformed posterior. Args: posterior: A posterior in the bilog-transformed space. + X: A `batch_shape x n x d`-dim tensor of inputs (if applicable). + This argument is not used by this transform. Returns: The un-transformed posterior.