From 4dce4cf6ca80fb99c2d5fcde5a607d569e48180b Mon Sep 17 00:00:00 2001 From: Nic Rummel Date: Tue, 5 Mar 2024 18:19:17 -0700 Subject: [PATCH 1/2] Implement the sketched least square problem boosting algorithm presented in https://arxiv.org/abs/2209.05705 --- examples/plot_bf_boosting.py | 218 +++++++++++++++++++ pyapprox/benchmarks/benchmarks.py | 5 +- pyapprox/surrogates/approximate.py | 137 +++++++++++- pyapprox/surrogates/polychaos/bf_boosting.py | 188 ++++++++++++++++ 4 files changed, 542 insertions(+), 6 deletions(-) create mode 100644 examples/plot_bf_boosting.py create mode 100644 pyapprox/surrogates/polychaos/bf_boosting.py diff --git a/examples/plot_bf_boosting.py b/examples/plot_bf_boosting.py new file mode 100644 index 00000000..9455338e --- /dev/null +++ b/examples/plot_bf_boosting.py @@ -0,0 +1,218 @@ +#%% +# define imports +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd +import numpy as np +from tqdm import tqdm +import time +# use reload for debugging +# %load_ext autoreload +# %autoreload 2 +from pyapprox.surrogates.approximate import adaptive_approximate, approximate +from pyapprox.interface.wrappers import ModelEnsemble +from pyapprox.benchmarks import setup_benchmark +from pyapprox.surrogates.polychaos.gpc import PolynomialChaosExpansion, define_poly_options_from_variable_transformation +from pyapprox.surrogates.interp.indexing import compute_hyperbolic_indices +from pyapprox.variables.transforms import AffineTransform +from pyapprox.interface.wrappers import WorkTrackingModel, TimerModel +#%% +## Define example +no_samp = 1000 +max_degree = 4 +sketch_sz = 18 +no_trials = 100 +no_runs = 1000 +np.random.seed(1) +## Set up high and low fidelity models +time_scenario = { + "final_time": 1.0, + "butcher_tableau": "im_crank2", + "deltat": 0.1, # default will be overwritten + "init_sol_fun": None, + "sink": None +} +config_values = [ + [20,10], + [10], + [0.125] +] +benchmark = setup_benchmark( + "multi_index_advection_diffusion", + kle_nvars=3, kle_length_scale=0.5, + time_scenario=time_scenario, config_values=config_values) +variables = benchmark.variable +# Define samples to use for each problem +samples = variables.rvs(no_samp) +# get the same set of indices for everyone +var_trans = AffineTransform(benchmark.variable) +poly_opts = define_poly_options_from_variable_transformation(var_trans) +indices = compute_hyperbolic_indices(var_trans.num_vars(), max_degree) +# In practice we would do this +funs = [WorkTrackingModel( + TimerModel(fun), base_model=fun) for fun in reversed(benchmark.funs)] +model_ensemble = ModelEnsemble(funs, ["hi", "lo"]) +#%% for speed of this experiment, cheat +# eval rhs for lofi and hifi models +print("Eval lofi") +start = time.time() +b_lofi = model_ensemble.functions[1](samples) +elapsed = time.time() - start +print(f" elapsed {elapsed} s") +print("Eval hifi") +start = time.time() +b_hifi = model_ensemble.functions[0](samples) +elapsed = time.time() - start +print(f" elapsed {elapsed} s") +#%% +# Define cheat stuff +class CheatModelEnsemble(ModelEnsemble): + def __init__(self, arrays, names): + self.functions = arrays + self.names = names +model_ensemble_cheat = CheatModelEnsemble([b_hifi,b_lofi],["hi","lo"]) +# helper function to evauate relative error of difference pce +def getError(sketcher_or_booster): + Ax = sketcher_or_booster['approx'].value(samples) + return np.linalg.norm(Ax - b_hifi, 2) / np.linalg.norm(b_hifi, 2) +#%% +# Define deterministic models +full_pce_res = approximate( + samples, + b_hifi, + 'polynomial_chaos', + options={ + 'basis_type':'fixed', + 'variable':benchmark.variable, + 'options':{ + 'indices':indices, + 'solver_type':'lstsq' + }, + 'poly_opts':poly_opts + } +) +# sketchers +# deterministic algorithm (for reference) +qr_sketched_res = adaptive_approximate( + b_hifi, + benchmark.variable, + 'polynomial_chaos', + options={ + 'method':'sketched', + 'options':{ + 'degree':max_degree, + 'samples':samples, + 'sketch_sz':sketch_sz, + 'sketch_type': "qr" + } + } +) +## Get error from deterministic models +# Full linear least squares problem error +error_unsketched = getError(full_pce_res) +# QR sketching is deterministic, so we need only do it once +error_qr = getError(qr_sketched_res) +#%% +# Stochastic Models +## The other sketching techniques require sampling from different catagorical distributions, so we do so no_runs times +error_unif = np.zeros(no_runs) +error_lev = np.zeros(no_runs) +error_unif_boosted = np.zeros(no_runs) +error_lev_boosted = np.zeros(no_runs) +for run_ix in tqdm(range(no_runs)): + # stochastic algorithms + unif_sketched_res = adaptive_approximate( + b_hifi, + benchmark.variable, + 'polynomial_chaos', + options={ + 'method':'sketched', + 'options':{ + 'degree':max_degree, + 'samples':samples, + 'sketch_sz':sketch_sz, + 'sketch_type': "uniform" + } + } + ) + lev_sketched_res = adaptive_approximate( + b_hifi, + benchmark.variable, + 'polynomial_chaos', + options={ + 'method':'sketched', + 'options':{ + 'degree':max_degree, + 'samples':samples, + 'sketch_sz':sketch_sz, + 'sketch_type': "leverage_score" + } + } + ) + # bifi boosters (both are stochastic) + bifi_unif_sketched_res = adaptive_approximate( + model_ensemble_cheat, + benchmark.variable, + 'polynomial_chaos', + options={ + 'method':'bf_boosted', + 'options':{ + 'degree':max_degree, + 'samples':samples, + 'no_trials':no_trials, + 'sketch_sz':sketch_sz, + 'sketch_type': "uniform" + } + } + ) + bifi_lev_sketched_res = adaptive_approximate( + model_ensemble_cheat, + benchmark.variable, + 'polynomial_chaos', + options={ + 'method':'bf_boosted', + 'options':{ + 'degree':max_degree, + 'samples':samples, + 'no_trials':no_trials, + 'sketch_sz':sketch_sz, + 'sketch_type': "leverage_score" + } + } + ) + error_unif[run_ix] = getError(unif_sketched_res) + error_lev[run_ix] = getError(lev_sketched_res) + error_unif_boosted[run_ix] = getError(bifi_unif_sketched_res) + error_lev_boosted[run_ix] = getError(bifi_lev_sketched_res) + +# %% +# plot +err_name = np.concatenate( + [ + np.repeat('Uniform', no_runs), + np.repeat('Leverage Score', no_runs), + np.repeat('Uniform', no_runs), + np.repeat('Leverage Score', no_runs) + ] +) +err_boosted = np.concatenate( + [ + np.repeat('Regular', no_runs), + np.repeat('Regular', no_runs), + np.repeat('Boosted', no_runs), + np.repeat('Boosted', no_runs) + ] +) +err_val = np.concatenate([error_unif, error_lev, error_unif_boosted, error_lev_boosted]) +dic = {'Error':err_val,'Sampling Method':err_name,'boosted':err_boosted} +df = pd.DataFrame(dic,index = np.arange(4*no_runs)) + +fig, axes = plt.subplots(1,1,figsize=(16,8),constrained_layout=True) +fig.suptitle('Isigami Comparison',fontsize=20) +fg = sns.boxplot(y='Error',x='Sampling Method',hue='boosted',data=df,ax=axes) +fg.axhline(error_qr,c='b',alpha=0.5,label="QR") +fg.axhline(error_unsketched,c='y',alpha=0.5,label="Full Problem") +fg.set_title(f'Max Degree = {max_degree}; Sketch Size=18',fontsize=14) +fg.set_yscale('log') +fg.legend(title=None,loc=1) +# %% diff --git a/pyapprox/benchmarks/benchmarks.py b/pyapprox/benchmarks/benchmarks.py index d6b12ffc..8925da04 100644 --- a/pyapprox/benchmarks/benchmarks.py +++ b/pyapprox/benchmarks/benchmarks.py @@ -1091,8 +1091,9 @@ def setup_advection_diffusion_kle_inversion_benchmark( kle_length_scale, kle_stdev, kle_nvars, orders, obs_indices) # add wrapper to allow execution times to be captured timer_model = TimerModel(base_model, base_model) - pool_model = PoolModel( - timer_model, max_eval_concurrency, base_model=base_model) + pool_model = timer_model + # PoolModel( + # timer_model, max_eval_concurrency, base_model=base_model) # add wrapper that tracks execution times. model = WorkTrackingModel(pool_model, base_model, enforce_timer_model=False) diff --git a/pyapprox/surrogates/approximate.py b/pyapprox/surrogates/approximate.py index 4eb7cb1e..db83fa08 100644 --- a/pyapprox/surrogates/approximate.py +++ b/pyapprox/surrogates/approximate.py @@ -46,7 +46,7 @@ PolynomialChaosExpansion, define_poly_options_from_variable_transformation ) from pyapprox.surrogates.neural_networks import NeuralNetwork - +from pyapprox.surrogates.polychaos.bf_boosting import fit_pce_with_bf_boosting, fit_pce_with_sketch class ApproximateResult(OptimizeResult): pass @@ -267,7 +267,9 @@ def adaptive_approximate_sparse_grid( def adaptive_approximate_polynomial_chaos( fun, variable, method="leja", options={}): methods = {"leja": adaptive_approximate_polynomial_chaos_leja, - "induced": adaptive_approximate_polynomial_chaos_induced} + "induced": adaptive_approximate_polynomial_chaos_induced, + "sketched": adaptive_approximate_polynomial_chaos_sketched, + "bf_boosted": adaptive_approximate_polynomial_chaos_bf_boosted} # "random": adaptive_approximate_polynomial_chaos_random} if method not in methods: @@ -457,6 +459,133 @@ def adaptive_approximate_polynomial_chaos_induced( pce.build(callback) return ApproximateResult({'approx': pce}) +def adaptive_approximate_polynomial_chaos_sketched( + fun, variables, + degree=None, samples=None, no_samp=None, + sketch_sz=None, sketch_type="leverage_score"): + r""" + Compute a Polynomial Chaos Expansion of a function based upon + sketched LSQ solve of the basis matrix and RHS + + Parameters + ---------- + fun : callable + The function to be minimized + + ``fun(z) -> np.ndarray`` + + where ``z`` is a 2D np.ndarray with shape (nvars,nsamples) and the + output is a 2D np.ndarray with shape (nsamples,nqoi) + + variables : IndependentMarginalsVariable + A set of independent univariate random variables + + degree: Maximum Degree of the polynomial basis + + samples: Samples that we will evaluate the RHS an pce at + + no_samp: (if samples is not provided) we will sample this many samples from the variables + + sketch_sz: Number of rows sketched + + sketch_type: Defines the choice of sketcher the following are available + "qr": QRSketcher + "uniform": UniformSketcher + "leverage_score": LeverageScoreSketcher + + Returns + ------- + result : :class:`pyapprox.surrogates.approximate.ApproximateResult` + Result object with the following attributes + + approx: :class:`pyapprox.surrogates.polychaos.gpc.PolynomialChaosExpansion` + The PCE approximation + """ + assert degree is not None, "Must set maximum degree of polynomial basis " + assert samples is not None or no_samp is not None, "Must provide samples or number of samples" + assert sketch_sz is not None, "Must set the number of rows to be sketched" + + var_trans = AffineTransform(variables) + poly_opts = define_poly_options_from_variable_transformation(var_trans) + if samples is None: samples = variables.rvs(no_samp) + pce = PolynomialChaosExpansion() + pce.configure(poly_opts) + # TODO: extend to other types of indices + indices = compute_hyperbolic_indices( pce.num_vars(), degree) + pce.set_indices(indices) + pce.basis_matrix(samples) + pce = fit_pce_with_sketch(pce, fun, samples, sketch_sz, sketch_type) + + return ApproximateResult({'approx': pce}) + +def adaptive_approximate_polynomial_chaos_bf_boosted( + model_ensemble, variables, + degree=None, samples=None, no_samp=None, + sketch_sz=None, no_trials=None, + sketch_type="leverage_score"): + r""" + Compute a Polynomial Chaos Expansion of a function based on the algorithm + presented in https://arxiv.org/abs/2209.05705 where low and high fidelity models + is present, but the high fidelity data is expensive to compute. The two models are + correlated, so with probabilistic guarantees, an optimal row sketch for the low fidelity + model is close to an optimal row sketch for the high fidelity model + + Parameters + ---------- + model_ensemble : ModelEnsemble + An instance of the ModelEnsemble class in pyapprox.wrappers with both a 'hi' + and 'lo' named models corresponding to high/low fidelity models respectively + + ``model_ensemble.evaluate_at_separated_samples(z,'low') -> np.ndarray`` + ``model_ensemble.evaluate_at_separated_samples(z,'hi') -> np.ndarray`` + + where ``z`` is a 2D np.ndarray with shape (nvars,nsamples) and the + output is a 2D np.ndarray with shape (nsamples,nqoi) + + variables : IndependentMarginalsVariable + A set of independent univariate random variables + + degree: Maximum degree of the polynomial basis + + samples: Samples that we will evaluate the RHS an pce at + + no_samp: (if samples is not provided) we will sample this many samples from the variables + + sketch_sz: Number of rows sketched + + no_trials: Number of trials used when sketching low fidelity data + + sketch_type: Defines the choice of sketcher the following are available + "uniform": UniformBifiBooster + "leverage_score": LeverageScoreBifiBooster + + Returns + ------- + result : :class:`pyapprox.surrogates.approximate.ApproximateResult` + Result object with the following attributes + + approx: :class:`pyapprox.surrogates.polychaos.gpc.PolynomialChaosExpansion` + The PCE approximation + """ + assert degree is not None, "Must set maximum degree of polynomial basis " + assert samples is not None or no_samp is not None, "Must provide samples or number of samples" + assert sketch_sz is not None, "Must set the number of rows to be sketched" + assert no_trials is not None, "Must set the number of trials for low fidelity sketching" + + var_trans = AffineTransform(variables) + poly_opts = define_poly_options_from_variable_transformation(var_trans) + # for comparison pass in the + if samples is None: samples = variables.rvs(no_samp) + + pce = PolynomialChaosExpansion() + pce.configure(poly_opts) + # TODO: extend to other types of indices + indices = compute_hyperbolic_indices(pce.num_vars(), degree) + pce.set_indices(indices) + pce.basis_matrix(samples) + pce = fit_pce_with_bf_boosting(pce, model_ensemble, samples, sketch_sz, no_trials, sketch_type) + + return ApproximateResult({'approx': pce}) def adaptive_approximate_polynomial_chaos_leja( fun, variables, @@ -948,7 +1077,6 @@ def approximate_neural_network(train_samples, train_vals, opts=optimizer_opts) return ApproximateResult({'approx': network}) - def approximate(train_samples, train_vals, method, options=None): r""" Approximate a scalar or vector-valued function of one or @@ -1625,7 +1753,7 @@ def _expanding_basis_pce(pce, train_samples, train_vals, hcross_strength=1, return pce, best_cv_score, best_reg_param -def approximate_fixed_pce(pce, train_samples, train_vals, indices, +def approximate_fixed_pce(pce, train_samples, train_vals, indices=None, verbose=1, solver_type='lasso', linear_solver_options={}): r""" @@ -1670,6 +1798,7 @@ def approximate_fixed_pce(pce, train_samples, train_vals, indices, coefs = [] if type(linear_solver_options) == dict: linear_solver_options = [linear_solver_options]*nqoi + assert indices is not None, 'indices must be specified for fixed pce' if type(indices) == np.ndarray: indices = [indices.copy() for ii in range(nqoi)] unique_indices = [] diff --git a/pyapprox/surrogates/polychaos/bf_boosting.py b/pyapprox/surrogates/polychaos/bf_boosting.py new file mode 100644 index 00000000..76343159 --- /dev/null +++ b/pyapprox/surrogates/polychaos/bf_boosting.py @@ -0,0 +1,188 @@ +import numpy as np +import scipy as sp +from pyapprox.interface.wrappers import ModelEnsemble +## Implement the work of https://arxiv.org/abs/2209.05705 in the pyapprox framework +''' + Right hand side function with memory +''' +class RightHandSideWithMemory: + def __init__(self, values, b): + self.values = values + if callable(b): + self.b = b + self.mem_b = np.nan*np.ones(values.shape[1]) + else: + self.b = None + self.mem_b = b.flatten() + def __getitem__(self, ix): + if self.b is not None: + mask = np.isnan(self.mem_b[ix]) + if any(mask): self.mem_b[ix[mask]] = self.b(self.values[:,ix[mask]]).flatten() + return self.mem_b[ix] +''' + Abstract class that solves sketched least squares problem +''' +class AbstractSketcher: + def __init__(self, values, A, b): + self.A = A + self.b = RightHandSideWithMemory(values, b) + def solve_sketched_lsq(self, sketch_sz, sketch_ix=None, scaling=None): + if sketch_ix is None or scaling is None: + sketch_ix, scaling = self._get_sketch_ix_and_scaling(sketch_sz) + assert sketch_ix is not None and scaling is not None + A_sketched, b_sketched = self._sketch_A_and_b(self.A, self.b, sketch_ix, scaling) + xstar = np.linalg.lstsq( + A_sketched, + b_sketched, + rcond=None + )[0] + xstar = xstar.reshape((len(xstar),1)) # pce set coeff expects a matrix of size (L, 1) + return xstar + def _sketch_A_and_b(self, A, b, sketch_ix, scaling): + A_sketched = A[sketch_ix,:] * scaling[:, np.newaxis] + b_sketched = self.b[sketch_ix] * scaling + return A_sketched, b_sketched + def _get_sketch_ix_and_scaling(self, sketch_sz): + raise NotImplementedError +''' + Deterministic Sketching method that looks at the row space of A though the QR pivoting +''' +class QRSketcher(AbstractSketcher): + def _get_sketch_ix_and_scaling(self, sketch_sz): + no_rows, no_cols = self.A.shape + assert sketch_sz < no_rows, "Must provide sketch that is larger than the number of rows" + assert sketch_sz < no_cols, "Assumed that we have an over determined system, that the number or columns is less than the number rows even in the sketched system" + qr_time = int(np.ceil(sketch_sz/ no_cols)) + ix_use = np.arange(no_rows) + sketch_ix = np.zeros(no_cols*qr_time, np.int32) + for i in range(qr_time): + _,_,p = sp.linalg.qr(self.A[ix_use,:].T, mode='economic', pivoting=True) + sketch_ix[i*no_cols:(i+1)*no_cols] = ix_use[p[:no_cols]] + ix_use = np.setdiff1d(ix_use, ix_use[p[:no_cols]]) + sketch_ix = sketch_ix[:sketch_sz] + return sketch_ix, np.ones(len(sketch_ix)) +''' + Sample uniformly from rows +''' +class UniformSketcher(AbstractSketcher): + def _get_sketch_ix_and_scaling(self, sketch_sz): + no_rows_total = self.A.shape[0] + samp_ix = np.random.choice(range(no_rows_total), size=sketch_sz, replace=True) + unique_samp_ix, counts = np.unique(samp_ix, return_counts=True) + real_sketch_sz = len(unique_samp_ix) + p_unif = np.ones(real_sketch_sz) / no_rows_total + scaling = np.sqrt(counts / (sketch_sz * p_unif)) + return unique_samp_ix, scaling +''' + Sample proportional to the leverage scores of rows +''' +class LeverageScoreSketcher(AbstractSketcher): + def __init__(self, values, A, b_lofi): + super().__init__(values, A, b_lofi) + no_rows_total = self.A.shape[0] + Q, _ = np.linalg.qr(self.A) + # make the probabilty distribution be poportional to the norm of the rows for A's QR decomp + ell = np.array([np.linalg.norm(Q[rix,:],2)**2 for rix in range(no_rows_total)]) + self.p = ell / ell.sum() + def _get_sketch_ix_and_scaling(self, sketch_sz): + no_rows_total = self.A.shape[0] + samp_ix = np.random.choice(range(no_rows_total), size=sketch_sz, replace=True, p=self.p) + unique_samp_ix, counts = np.unique(samp_ix, return_counts=True) + real_sketch_sz = len(unique_samp_ix) + p_unif = np.ones(real_sketch_sz) / no_rows_total + scaling = np.sqrt(counts / (sketch_sz * p_unif)) + return unique_samp_ix, scaling +''' + Abstract class that leverages the boosting algorithm with bi-fidelity data +''' +class AbstractBiFiBooster(AbstractSketcher): + def __init__(self, values, A, model_ensemble): + if not isinstance(model_ensemble, ModelEnsemble): + raise TypeError(f"'model_ensemble' must be of type ModelEnsemble it is of type {type(model_ensemble)}") + if (not 'hi' in model_ensemble.names) or (not 'lo' in model_ensemble.names): + raise TypeError("'model_ensemble' must be a ModelEnsemble with 'lo' and 'hi' ") + loIx = model_ensemble.names.index('lo') + hiIx = model_ensemble.names.index('hi') + assert loIx is not None, 'Must provide a lofi model to the model ensemble' + assert hiIx is not None, 'Must provide a hifi model to the model ensemble' + b_lofi = model_ensemble.functions[loIx] + b_hifi = model_ensemble.functions[hiIx] + self.all_ix = np.arange(values.shape[1]) + super().__init__(values, A, b_lofi) + self.b = RightHandSideWithMemory(values, b_lofi) + self.b_hifi = RightHandSideWithMemory(values, b_hifi) + + def solve_sketched_lsq(self, sketch_sz, no_trials): + no_coeff = self.A.shape[1] + # coefficient matrix from lofi sketches + IX = [None for _ in range(no_trials)] + X = np.zeros([no_coeff, no_trials]) + # Sketch on lofi problem no_trials number of times + for l in range(no_trials): + IX[l], scaling = self._get_sketch_ix_and_scaling(sketch_sz) + X[:,l] = AbstractSketcher.solve_sketched_lsq(self,sketch_sz, IX[l], scaling).flatten() + # Select best + abs_err = np.array([np.linalg.norm(np.dot(self.A, X[:,l]) - self.b[self.all_ix],2) for l in range(X.shape[1])]) + ix_best = np.argmin(abs_err) + # Solve sketched problem with hifi + bestIX_len = len(IX[ix_best]) + A_sketched, b_sketched = self._sketch_A_and_b(self.A, self.b_hifi, IX[ix_best], np.ones(bestIX_len)/ bestIX_len) + xstar = np.linalg.lstsq( + A_sketched, + b_sketched, + rcond=None + )[0] + xstar = xstar.reshape((len(xstar),1)) # pce set coeff expects a matrix of size (L, 1) + return xstar + def _get_sketch_ix_and_scaling(self, sketch_sz): + raise NotImplementedError +''' + Combine the bifidelity algorithm with uniform sampling +''' +class UniformBifiBooster(AbstractBiFiBooster, UniformSketcher): + def __init__(self, values, A, model_ensemble): + AbstractBiFiBooster.__init__(self, values, A, model_ensemble) + def solve_sketched_lsq(self, sketch_sz, no_trials): + return AbstractBiFiBooster.solve_sketched_lsq(self,sketch_sz, no_trials) + def _get_sketch_ix_and_scaling(self, sketch_sz): + return UniformSketcher._get_sketch_ix_and_scaling(self,sketch_sz) +''' + Combine the bifidelity algorithm with leverage scoring +''' +class LeverageScoreBifiBooster(AbstractBiFiBooster, LeverageScoreSketcher): + def __init__(self, values, A, model_ensemble): + AbstractBiFiBooster.__init__(self, values, A, model_ensemble) + def solve_sketched_lsq(self, sketch_sz, no_trials): + return AbstractBiFiBooster.solve_sketched_lsq(self,sketch_sz, no_trials) + def _get_sketch_ix_and_scaling(self, sketch_sz): + return LeverageScoreSketcher._get_sketch_ix_and_scaling(self,sketch_sz) +''' + Given a Polynomial Chaos expansion, and a function we wish to approximate solve for coeefficients using a row sketch of the basis_matrix +''' +def fit_pce_with_sketch(pce, fun, nodes, sketch_sz, sketch_type): + + sketch_types={ + "qr": QRSketcher, + "uniform": UniformSketcher, + "leverage_score": LeverageScoreSketcher, + } + sketcher = sketch_types[sketch_type](nodes, pce.basis_matrix(nodes), fun) + coeff = sketcher.solve_sketched_lsq(sketch_sz) + pce.set_coefficients(coeff) + return pce +''' + Given a Polynomial Chaos expansion, and a model ensemble containing a low and hi fidelity solve for coeefficients using a row sketch of the basis_matrix +''' +def fit_pce_with_bf_boosting(pce, model_ensemble, nodes, sketch_sz, no_trials, sketch_type): + booster_types={ + "uniform": UniformBifiBooster, + "leverage_score": LeverageScoreBifiBooster + } + booster = booster_types[sketch_type]( + nodes, + pce.basis_matrix(nodes), + model_ensemble + ) + coeff = booster.solve_sketched_lsq(sketch_sz, no_trials) + pce.set_coefficients(coeff) + return pce \ No newline at end of file From 998644847dc4a704044cb9b74c65291972adf7dd Mon Sep 17 00:00:00 2001 From: Nic Rummel Date: Wed, 19 Jun 2024 13:03:44 -0600 Subject: [PATCH 2/2] Update .gitignore to ignore all __pycache__ even in subdirs --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1476cad4..23b42595 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ pyapprox/cython/*.c .vscode/ pyapprox/__pycache__/ +**/__pycache__/ \ No newline at end of file