diff --git a/docs/404.html b/docs/404.html index dff5039..c2b3416 100644 --- a/docs/404.html +++ b/docs/404.html @@ -88,7 +88,7 @@ modeltime.ensemble - 0.4.0 + 0.4.2 @@ -96,7 +96,7 @@
Next, we select a forecast horizon of 24 days and extend the data frame with the function future_frame()
. We do this to create a future dataset, which we can distinguish because its values will be NA.
library(tidymodels)
library(modeltime)
library(modeltime.ensemble)
-library(tidyverse)
+library(tidyverse)
library(timetk)
NEWS.md
modeltime
0.6.0).modeltime
0.6.0 and parsnip
0.1.6 to align with xgboost upgrades.Recursive Ensembles
#> ── Attaching packages ────────────────────────────────────── tidymodels 0.1.2 ──#> ✓ broom 0.7.5 ✓ recipes 0.1.15 -#> ✓ dials 0.0.9.9000 ✓ rsample 0.0.9 -#> ✓ dplyr 1.0.5 ✓ tibble 3.1.0 -#> ✓ ggplot2 3.3.3 ✓ tidyr 1.1.3 -#> ✓ infer 0.5.4 ✓ tune 0.1.3 -#> ✓ modeldata 0.1.0 ✓ workflows 0.2.2 -#> ✓ parsnip 0.1.5 ✓ yardstick 0.0.8 -#> ✓ purrr 0.3.4#> ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ── -#> x purrr::discard() masks scales::discard() -#> x dplyr::filter() masks stats::filter() -#> x dplyr::lag() masks stats::lag() -#> x recipes::step() masks stats::step()library(modeltime) ++#> ── Attaching packages ────────────────────────────────────── tidymodels 0.1.3 ──#> ✓ broom 0.7.8 ✓ recipes 0.1.16 +#> ✓ dials 0.0.9.9000 ✓ rsample 0.1.0 +#> ✓ dplyr 1.0.7 ✓ tibble 3.1.2 +#> ✓ ggplot2 3.3.5 ✓ tidyr 1.1.3 +#> ✓ infer 0.5.4 ✓ tune 0.1.5 +#> ✓ modeldata 0.1.0 ✓ workflows 0.2.2 +#> ✓ parsnip 0.1.6 ✓ workflowsets 0.0.2 +#> ✓ purrr 0.3.4 ✓ yardstick 0.0.8#> ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ── +#> x purrr::discard() masks scales::discard() +#> x dplyr::filter() masks stats::filter() +#> x dplyr::lag() masks stats::lag() +#> x recipes::step() masks stats::step() +#> • Use tidymodels_prefer() to resolve common conflicts.#> ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──#> ✓ readr 1.4.0 ✓ forcats 0.5.1 -#> ✓ stringr 1.4.0#> ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── -#> x readr::col_factor() masks scales::col_factor() -#> x purrr::discard() masks scales::discard() -#> x dplyr::filter() masks stats::filter() -#> x stringr::fixed() masks recipes::fixed() -#> x dplyr::lag() masks stats::lag() -#> x readr::spec() masks yardstick::spec()#> ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──#> ✓ readr 1.4.0 ✓ forcats 0.5.1 +#> ✓ stringr 1.4.0#> ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ── +#> x readr::col_factor() masks scales::col_factor() +#> x purrr::discard() masks scales::discard() +#> x dplyr::filter() masks stats::filter() +#> x stringr::fixed() masks recipes::fixed() +#> x dplyr::lag() masks stats::lag() +#> x readr::spec() masks yardstick::spec()library(timetk) # Make an ensemble from a Modeltime Table ensemble_fit <- m750_models %>% @@ -276,12 +278,12 @@Examp #> Ensemble of 3 Models (MEAN) #> #> # Modeltime Table -#> # A tibble: 3 x 3 +#> # A tibble: 3 x 3 #> .model_id .model .model_desc -#> <int> <list> <chr> -#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] -#> 2 2 <workflow> PROPHET -#> 3 3 <workflow> GLMNET
+#> <int> <list> <chr> +#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] +#> 2 2 <workflow> PROPHET +#> 3 3 <workflow> GLMNET# Forecast with the Ensemble modeltime_table( ensemble_fit @@ -294,8 +296,16 @@Examp .interactive = FALSE, .conf_interval_show = FALSE ) -
-#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.# } + +
doFuture
. Then set control = control_grid(allow_par = TRUE)
library(tidymodels) ++# \donttest{ +library(tidymodels) library(modeltime) library(modeltime.ensemble) -library(tidyverse) +library(tidyverse) library(timetk) -# \donttest{ # Step 1: Make resample predictions for submodels resamples_tscv <- training(m750_splits) %>% time_series_cv( @@ -337,7 +337,7 @@Examp control = control_resamples(verbose = TRUE) )
#> ── Fitting Resamples ──────────────────────────────────────────── -#>#> ● Model ID: 1 ARIMA(0,1,1)(0,1,1)[12]#> i Slice1: preprocessor 1/1#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> frequency = 12 observations per 1 year#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> ● Model ID: 2 PROPHET#> i Slice1: preprocessor 1/1#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> Disabling weekly seasonality. Run prophet with weekly.seasonality=TRUE to override this.#> Disabling daily seasonality. Run prophet with daily.seasonality=TRUE to override this.#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> ● Model ID: 3 GLMNET#> i Slice1: preprocessor 1/1#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> 4.245 sec elapsed +#>#> • Model ID: 1 ARIMA(0,1,1)(0,1,1)[12]#> i Slice1: preprocessor 1/1#> ! Slice1: preprocessor 1/1: 'keep_original_cols' was added to `step_dummy()` after...#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> frequency = 12 observations per 1 year#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> • Model ID: 2 PROPHET#> i Slice1: preprocessor 1/1#> ! Slice1: preprocessor 1/1: 'keep_original_cols' was added to `step_dummy()` after...#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> Disabling weekly seasonality. Run prophet with weekly.seasonality=TRUE to override this.#> Disabling daily seasonality. Run prophet with daily.seasonality=TRUE to override this.#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> • Model ID: 3 GLMNET#> i Slice1: preprocessor 1/1#> ! Slice1: preprocessor 1/1: 'keep_original_cols' was added to `step_dummy()` after...#> ✓ Slice1: preprocessor 1/1#> i Slice1: preprocessor 1/1, model 1/1#> ✓ Slice1: preprocessor 1/1, model 1/1#> i Slice1: preprocessor 1/1, model 1/1 (predictions)#> 3.972 sec elapsed #># Step 2: Metalearner ---- @@ -347,18 +347,18 @@Examp model_spec = linear_reg() %>% set_engine("lm"), control = control_grid(verbose = TRUE) ) -
#> ── Fitting Non-Tunable Model Specification ──────────────────────#> ℹ Fitting model spec to submodel cross-validation predictions.#>#> ℹ Prediction Error Comparison:#> # A tibble: 4 x 3 +#> ── Fitting Non-Tunable Model Specification ──────────────────────#> ℹ Fitting model spec to submodel cross-validation predictions.#>#> ℹ Prediction Error Comparison:#> # A tibble: 4 x 3 #> .model_id rmse .model_desc -#> <chr> <dbl> <chr> -#> 1 1 579. ARIMA(0,1,1)(0,1,1)[12] -#> 2 2 381. PROPHET -#> 3 3 548. GLMNET -#> 4 ensemble 139. ENSEMBLE (MODEL SPEC) +#> <chr> <dbl> <chr> +#> 1 1 579. ARIMA(0,1,1)(0,1,1)[12] +#> 2 2 381. PROPHET +#> 3 3 548. GLMNET +#> 4 ensemble 139. ENSEMBLE (MODEL SPEC) #> #> ── Final Model ────────────────────────────────────────────────── -#>#> ℹ Model Workflow:#> ══ Workflow [trained] ══════════════════════════════════════════════════════════ -#> Preprocessor: Recipe -#> Model: linear_reg() +#>#> ℹ Model Workflow:#> ══ Workflow [trained] ══════════════════════════════════════════════════════════ +#> Preprocessor: Recipe +#> Model: linear_reg() #> #> ── Preprocessor ──────────────────────────────────────────────────────────────── #> 0 Recipe Steps @@ -373,19 +373,19 @@Examp #> -2684.9718 0.4702 -0.0196 0.7962 #> #> -#> 0.346 sec elapsed +#> 0.383 sec elapsed #>
ensemble_fit_lm#> ── Modeltime Ensemble ─────────────────────────────────────────── #> Ensemble of 3 Models (LM STACK) #> #> # Modeltime Table -#> # A tibble: 3 x 3 +#> # A tibble: 3 x 3 #> .model_id .model .model_desc -#> <int> <list> <chr> -#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] -#> 2 2 <workflow> PROPHET -#> 3 3 <workflow> GLMNET+#> <int> <list> <chr> +#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] +#> 2 2 <workflow> PROPHET +#> 3 3 <workflow> GLMNET# * With Metalearner Tuning ---- ensemble_fit_glmnet <- submodel_predictions %>% ensemble_model_spec( @@ -397,97 +397,97 @@Examp grid = 2, control = control_grid(verbose = TRUE) ) -
#> ── Tuning Model Specification ───────────────────────────────────#> ℹ Performing 5-Fold Cross Validation.#>#> i Fold1: preprocessor 1/1#> ✓ Fold1: preprocessor 1/1#> i Fold1: preprocessor 1/1, model 1/2#> ✓ Fold1: preprocessor 1/1, model 1/2#> i Fold1: preprocessor 1/1, model 1/2 (predictions)#> i Fold1: preprocessor 1/1, model 2/2#> ✓ Fold1: preprocessor 1/1, model 2/2#> i Fold1: preprocessor 1/1, model 2/2 (predictions)#> i Fold2: preprocessor 1/1#> ✓ Fold2: preprocessor 1/1#> i Fold2: preprocessor 1/1, model 1/2#> ✓ Fold2: preprocessor 1/1, model 1/2#> i Fold2: preprocessor 1/1, model 1/2 (predictions)#> i Fold2: preprocessor 1/1, model 2/2#> ✓ Fold2: preprocessor 1/1, model 2/2#> i Fold2: preprocessor 1/1, model 2/2 (predictions)#> i Fold3: preprocessor 1/1#> ✓ Fold3: preprocessor 1/1#> i Fold3: preprocessor 1/1, model 1/2#> ✓ Fold3: preprocessor 1/1, model 1/2#> i Fold3: preprocessor 1/1, model 1/2 (predictions)#> i Fold3: preprocessor 1/1, model 2/2#> ✓ Fold3: preprocessor 1/1, model 2/2#> i Fold3: preprocessor 1/1, model 2/2 (predictions)#> i Fold4: preprocessor 1/1#> ✓ Fold4: preprocessor 1/1#> i Fold4: preprocessor 1/1, model 1/2#> ✓ Fold4: preprocessor 1/1, model 1/2#> i Fold4: preprocessor 1/1, model 1/2 (predictions)#> i Fold4: preprocessor 1/1, model 2/2#> ✓ Fold4: preprocessor 1/1, model 2/2#> i Fold4: preprocessor 1/1, model 2/2 (predictions)#> i Fold5: preprocessor 1/1#> ✓ Fold5: preprocessor 1/1#> i Fold5: preprocessor 1/1, model 1/2#> ✓ Fold5: preprocessor 1/1, model 1/2#> i Fold5: preprocessor 1/1, model 1/2 (predictions)#> i Fold5: preprocessor 1/1, model 2/2#> ✓ Fold5: preprocessor 1/1, model 2/2#> i Fold5: preprocessor 1/1, model 2/2 (predictions)#> ✓ Finished tuning Model Specification.#>#> ℹ Model Parameters:#> # A tibble: 1 x 8 -#> penalty mixture .metric .estimator mean n std_err .config -#> <dbl> <dbl> <chr> <chr> <dbl> <int> <dbl> <chr> -#> 1 0.0000000202 0.461 rmse standard 152. 5 15.2 Preprocessor1_Mod… -#>#> ℹ Prediction Error Comparison:#> # A tibble: 4 x 3 +#> ── Tuning Model Specification ───────────────────────────────────#> ℹ Performing 5-Fold Cross Validation.#>#> i Fold1: preprocessor 1/1#> ✓ Fold1: preprocessor 1/1#> i Fold1: preprocessor 1/1, model 1/2#> ✓ Fold1: preprocessor 1/1, model 1/2#> i Fold1: preprocessor 1/1, model 1/2 (predictions)#> i Fold1: preprocessor 1/1, model 2/2#> ✓ Fold1: preprocessor 1/1, model 2/2#> i Fold1: preprocessor 1/1, model 2/2 (predictions)#> i Fold2: preprocessor 1/1#> ✓ Fold2: preprocessor 1/1#> i Fold2: preprocessor 1/1, model 1/2#> ✓ Fold2: preprocessor 1/1, model 1/2#> i Fold2: preprocessor 1/1, model 1/2 (predictions)#> i Fold2: preprocessor 1/1, model 2/2#> ✓ Fold2: preprocessor 1/1, model 2/2#> i Fold2: preprocessor 1/1, model 2/2 (predictions)#> i Fold3: preprocessor 1/1#> ✓ Fold3: preprocessor 1/1#> i Fold3: preprocessor 1/1, model 1/2#> ✓ Fold3: preprocessor 1/1, model 1/2#> i Fold3: preprocessor 1/1, model 1/2 (predictions)#> i Fold3: preprocessor 1/1, model 2/2#> ✓ Fold3: preprocessor 1/1, model 2/2#> i Fold3: preprocessor 1/1, model 2/2 (predictions)#> i Fold4: preprocessor 1/1#> ✓ Fold4: preprocessor 1/1#> i Fold4: preprocessor 1/1, model 1/2#> ✓ Fold4: preprocessor 1/1, model 1/2#> i Fold4: preprocessor 1/1, model 1/2 (predictions)#> i Fold4: preprocessor 1/1, model 2/2#> ✓ Fold4: preprocessor 1/1, model 2/2#> i Fold4: preprocessor 1/1, model 2/2 (predictions)#> i Fold5: preprocessor 1/1#> ✓ Fold5: preprocessor 1/1#> i Fold5: preprocessor 1/1, model 1/2#> ✓ Fold5: preprocessor 1/1, model 1/2#> i Fold5: preprocessor 1/1, model 1/2 (predictions)#> i Fold5: preprocessor 1/1, model 2/2#> ✓ Fold5: preprocessor 1/1, model 2/2#> i Fold5: preprocessor 1/1, model 2/2 (predictions)#> ✓ Finished tuning Model Specification.#>#> ℹ Model Parameters:#> # A tibble: 1 x 8 +#> penalty mixture .metric .estimator mean n std_err .config +#> <dbl> <dbl> <chr> <chr> <dbl> <int> <dbl> <chr> +#> 1 0.000000325 0.166 rmse standard 152. 5 12.4 Preprocessor1_Mode… +#>#> ℹ Prediction Error Comparison:#> # A tibble: 4 x 3 #> .model_id rmse .model_desc -#> <chr> <dbl> <chr> -#> 1 1 579. ARIMA(0,1,1)(0,1,1)[12] -#> 2 2 381. PROPHET -#> 3 3 548. GLMNET -#> 4 ensemble 139. ENSEMBLE (MODEL SPEC) +#> <chr> <dbl> <chr> +#> 1 1 579. ARIMA(0,1,1)(0,1,1)[12] +#> 2 2 381. PROPHET +#> 3 3 548. GLMNET +#> 4 ensemble 139. ENSEMBLE (MODEL SPEC) #> #> ── Final Model ────────────────────────────────────────────────── -#>#> ℹ Model Workflow:#> ══ Workflow [trained] ══════════════════════════════════════════════════════════ -#> Preprocessor: Recipe -#> Model: linear_reg() +#>#> ℹ Model Workflow:#> ══ Workflow [trained] ══════════════════════════════════════════════════════════ +#> Preprocessor: Recipe +#> Model: linear_reg() #> #> ── Preprocessor ──────────────────────────────────────────────────────────────── #> 0 Recipe Steps #> #> ── Model ─────────────────────────────────────────────────────────────────────── #> -#> Call: glmnet::glmnet(x = maybe_matrix(x), y = y, family = "gaussian", alpha = ~0.460740306870775) +#> Call: glmnet::glmnet(x = maybe_matrix(x), y = y, family = "gaussian", alpha = ~0.166370713403474) #> -#> Df %Dev Lambda -#> 1 0 0.00 1198.00 -#> 2 2 8.52 1091.00 -#> 3 3 18.69 994.50 -#> 4 3 28.37 906.10 -#> 5 3 36.97 825.60 -#> 6 3 44.57 752.30 -#> 7 3 51.26 685.40 -#> 8 3 57.12 624.50 -#> 9 3 62.25 569.10 -#> 10 3 66.72 518.50 -#> 11 3 70.60 472.40 -#> 12 3 73.96 430.50 -#> 13 3 76.86 392.20 -#> 14 3 79.36 357.40 -#> 15 3 81.52 325.60 -#> 16 3 83.36 296.70 -#> 17 3 84.95 270.40 -#> 18 3 86.31 246.30 -#> 19 3 87.47 224.50 -#> 20 3 88.46 204.50 -#> 21 3 89.31 186.30 -#> 22 3 90.04 169.80 -#> 23 3 90.66 154.70 -#> 24 3 91.19 141.00 -#> 25 3 91.64 128.40 -#> 26 3 92.02 117.00 -#> 27 3 92.35 106.60 -#> 28 3 92.63 97.16 -#> 29 3 92.88 88.53 -#> 30 3 93.08 80.66 -#> 31 3 93.26 73.50 -#> 32 3 93.41 66.97 -#> 33 3 93.54 61.02 -#> 34 3 93.65 55.60 -#> 35 3 93.75 50.66 -#> 36 3 93.83 46.16 -#> 37 3 93.91 42.06 -#> 38 3 93.97 38.32 -#> 39 3 94.02 34.92 -#> 40 3 94.07 31.82 -#> 41 3 94.11 28.99 -#> 42 3 94.14 26.41 -#> 43 3 94.17 24.07 -#> 44 3 94.20 21.93 -#> 45 3 94.22 19.98 -#> 46 3 94.24 18.21 +#> Df %Dev Lambda +#> 1 0 0.00 3317.0 +#> 2 3 4.07 3023.0 +#> 3 3 10.33 2754.0 +#> 4 3 16.46 2509.0 +#> 5 3 22.43 2286.0 +#> 6 3 28.21 2083.0 +#> 7 3 33.75 1898.0 +#> 8 3 39.05 1730.0 +#> 9 3 44.08 1576.0 +#> 10 3 48.82 1436.0 +#> 11 3 53.27 1308.0 +#> 12 3 57.40 1192.0 +#> 13 3 61.23 1086.0 +#> 14 3 64.76 989.7 +#> 15 3 67.99 901.8 +#> 16 3 70.93 821.7 +#> 17 3 73.60 748.7 +#> 18 3 76.00 682.2 +#> 19 3 78.15 621.6 +#> 20 3 80.08 566.4 +#> 21 3 81.79 516.1 +#> 22 3 83.31 470.2 +#> 23 3 84.66 428.4 +#> 24 3 85.85 390.4 +#> 25 3 86.89 355.7 +#> 26 3 87.81 324.1 +#> 27 3 88.61 295.3 +#> 28 3 89.31 269.1 +#> 29 3 89.93 245.2 +#> 30 3 90.46 223.4 +#> 31 3 90.93 203.5 +#> 32 3 91.34 185.5 +#> 33 3 91.70 169.0 +#> 34 3 92.02 154.0 +#> 35 3 92.29 140.3 +#> 36 3 92.53 127.8 +#> 37 3 92.74 116.5 +#> 38 3 92.93 106.1 +#> 39 3 93.09 96.7 +#> 40 3 93.23 88.1 +#> 41 3 93.36 80.3 +#> 42 3 93.47 73.2 +#> 43 3 93.57 66.7 +#> 44 3 93.66 60.7 +#> 45 3 93.74 55.3 +#> 46 3 93.81 50.4 #> #> ... -#> and 19 more lines. +#> and 33 more lines. #> -#> 3.669 sec elapsed +#> 3.99 sec elapsed #>ensemble_fit_glmnet#> ── Modeltime Ensemble ─────────────────────────────────────────── #> Ensemble of 3 Models (GLMNET STACK) #> #> # Modeltime Table -#> # A tibble: 3 x 3 +#> # A tibble: 3 x 3 #> .model_id .model .model_desc -#> <int> <list> <chr> -#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] -#> 2 2 <workflow> PROPHET -#> 3 3 <workflow> GLMNET+#> <int> <list> <chr> +#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] +#> 2 2 <workflow> PROPHET +#> 3 3 <workflow> GLMNET# } -
library(tidymodels) ++# \donttest{ +library(tidymodels) library(modeltime) library(modeltime.ensemble) -library(tidyverse) +library(tidyverse) library(timetk) # Make an ensemble from a Modeltime Table @@ -261,12 +262,12 @@Examp #> Ensemble of 3 Models (WEIGHTED) #> #> # Modeltime Table -#> # A tibble: 3 x 4 +#> # A tibble: 3 x 4 #> .model_id .model .model_desc .loadings -#> <int> <list> <chr> <dbl> -#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] 0.429 -#> 2 2 <workflow> PROPHET 0.429 -#> 3 3 <workflow> GLMNET 0.143
+#> <int> <list> <chr> <dbl> +#> 1 1 <workflow> ARIMA(0,1,1)(0,1,1)[12] 0.429 +#> 2 2 <workflow> PROPHET 0.429 +#> 3 3 <workflow> GLMNET 0.143# Forecast with the Ensemble modeltime_table( ensemble_fit @@ -279,8 +280,16 @@Examp .interactive = FALSE, .conf_interval_show = FALSE ) -
-#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.#> Warning: 'keep_original_cols' was added to `step_dummy()` after this recipe was created. +#> Regenerate your recipe to avoid this warning.# } + +