diff --git a/README.md b/README.md
index 6f04f2b..496f6db 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
-# Cognitive Models
+# Cognitive Models
*Computational Modeling of Cognitive Processes with Bayesian Mixed Models in Julia*
[![](https://img.shields.io/badge/status-looking_for_collaborators-orange)](https://github.com/DominiqueMakowski/CognitiveModels/issues)
-[![](https://img.shields.io/badge/access-open-green)](https://dominiquemakowski.github.io/CognitiveModels/)
+[![](https://img.shields.io/badge/access-open-brightgreen)](https://dominiquemakowski.github.io/CognitiveModels/)
The project is to write an open-access book on **cognitive models**, i.e., statistical models that best fit **psychological data** (e.g., reaction times, scales from surveys, ...).
This framework aims at moving away from a mere description of the data, to make inferences about the underlying cognitive processes that led to its generation.
@@ -18,20 +18,10 @@ Importantly, it is currently the only language in which we can fit all the cogni
Unfortunately, cognitive models often involve distributions for which Frequentist estimations are not yet implemented, and usually contain a lot of parameters (due to the presence of **random effects**), which makes traditional algorithms fail to converge.
Simply put, the Bayesian approach is the only one currently robust enough to fit these complex models.
-## The Plan
-
-As this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable to keep up with the latest best practices.
-
-- [ ] Decide on the framework to build the book in a reproducible and collaborative manner (Quarto?)
-- [ ] Set up the infrastructure to automatically build it using GitHub actions and host it on GitHub pages
-- [ ] Write the content of the book
-- [ ] Referencing
- - Add Zenodo DOI and reference (but how to deal with evolving author? Through versioning?)
- - Publish a paper to present the book project ([JOSE](https://jose.theoj.org/))?
-
-
## Looking for Coauthors
+As this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable by contributors to keep up with the latest best practices.
+
This project can only be achieved by a team, and I suspect no single person has currently all the skills and knowledge to cover all the content. We need many people who have strengths in various aspects, such as Julia/Turing, theory, writing, making plots etc.
Most importantly, this project can serve as a way for us to learn more about this approach to psychological science.
@@ -40,3 +30,11 @@ Most importantly, this project can serve as a way for us to learn more about thi
## Content
See current WIP [**table of content**](https://dominiquemakowski.github.io/CognitiveModels/).
+
+- Fundamentals of Bayesian Modeling in Julia
+- On Predictors
+- Choices and Scales
+- Reaction Times
+ - [**Descriptive Models**](https://dominiquemakowski.github.io/CognitiveModels/4a_rt_descriptive.html)
+ - [**Generative Models**](https://dominiquemakowski.github.io/CognitiveModels/4b_rt_generative_.html)
+- Individual Differences
\ No newline at end of file
diff --git a/content/.jupyter_cache/executed/cca5e989665b19a9f718c608e134ee05/base.ipynb b/content/.jupyter_cache/executed/cca5e989665b19a9f718c608e134ee05/base.ipynb
new file mode 100644
index 0000000..1e77120
--- /dev/null
+++ b/content/.jupyter_cache/executed/cca5e989665b19a9f718c608e134ee05/base.ipynb
@@ -0,0 +1,581 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a2148d5a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import IJulia\n",
+ "\n",
+ "# The julia kernel has built in support for Revise.jl, so this is the \n",
+ "# recommended approach for long-running sessions:\n",
+ "# https://github.com/JuliaLang/IJulia.jl/blob/9b10fa9b879574bbf720f5285029e07758e50a5e/src/kernel.jl#L46-L51\n",
+ "\n",
+ "# Users should enable revise within .julia/config/startup_ijulia.jl:\n",
+ "# https://timholy.github.io/Revise.jl/stable/config/#Using-Revise-automatically-within-Jupyter/IJulia-1\n",
+ "\n",
+ "# clear console history\n",
+ "IJulia.clear_history()\n",
+ "\n",
+ "fig_width = 7\n",
+ "fig_height = 5\n",
+ "fig_format = :retina\n",
+ "fig_dpi = 96\n",
+ "\n",
+ "# no retina format type, use svg for high quality type/marks\n",
+ "if fig_format == :retina\n",
+ " fig_format = :svg\n",
+ "elseif fig_format == :pdf\n",
+ " fig_dpi = 96\n",
+ " # Enable PDF support for IJulia\n",
+ " IJulia.register_mime(MIME(\"application/pdf\"))\n",
+ "end\n",
+ "\n",
+ "# convert inches to pixels\n",
+ "fig_width = fig_width * fig_dpi\n",
+ "fig_height = fig_height * fig_dpi\n",
+ "\n",
+ "# Intialize Plots w/ default fig width/height\n",
+ "try\n",
+ " import Plots\n",
+ "\n",
+ " # Plots.jl doesn't support PDF output for versions < 1.28.1\n",
+ " # so use png (if the DPI remains the default of 300 then set to 96)\n",
+ " if (Plots._current_plots_version < v\"1.28.1\") & (fig_format == :pdf)\n",
+ " Plots.gr(size=(fig_width, fig_height), fmt = :png, dpi = fig_dpi)\n",
+ " else\n",
+ " Plots.gr(size=(fig_width, fig_height), fmt = fig_format, dpi = fig_dpi)\n",
+ " end\n",
+ "catch e\n",
+ " # @warn \"Plots init\" exception=(e, catch_backtrace())\n",
+ "end\n",
+ "\n",
+ "# Initialize CairoMakie with default fig width/height\n",
+ "try\n",
+ " import CairoMakie\n",
+ "\n",
+ " # CairoMakie's display() in PDF format opens an interactive window\n",
+ " # instead of saving to the ipynb file, so we don't do that.\n",
+ " # https://github.com/quarto-dev/quarto-cli/issues/7548\n",
+ " if fig_format == :pdf\n",
+ " CairoMakie.activate!(type = \"png\")\n",
+ " else\n",
+ " CairoMakie.activate!(type = string(fig_format))\n",
+ " end\n",
+ " CairoMakie.update_theme!(resolution=(fig_width, fig_height))\n",
+ "catch e\n",
+ " # @warn \"CairoMakie init\" exception=(e, catch_backtrace())\n",
+ "end\n",
+ " \n",
+ "# Set run_path if specified\n",
+ "try\n",
+ " run_path = raw\"C:\\Users\\domma\\Dropbox\\Software\\CognitiveModels\\content\"\n",
+ " if !isempty(run_path)\n",
+ " cd(run_path)\n",
+ " end\n",
+ "catch e\n",
+ " @warn \"Run path init:\" exception=(e, catch_backtrace())\n",
+ "end\n",
+ "\n",
+ "\n",
+ "# emulate old Pkg.installed beahvior, see\n",
+ "# https://discourse.julialang.org/t/how-to-use-pkg-dependencies-instead-of-pkg-installed/36416/9\n",
+ "import Pkg\n",
+ "function isinstalled(pkg::String)\n",
+ " any(x -> x.name == pkg && x.is_direct_dep, values(Pkg.dependencies()))\n",
+ "end\n",
+ "\n",
+ "# ojs_define\n",
+ "if isinstalled(\"JSON\") && isinstalled(\"DataFrames\")\n",
+ " import JSON, DataFrames\n",
+ " global function ojs_define(; kwargs...)\n",
+ " convert(x) = x\n",
+ " convert(x::DataFrames.AbstractDataFrame) = Tables.rows(x)\n",
+ " content = Dict(\"contents\" => [Dict(\"name\" => k, \"value\" => convert(v)) for (k, v) in kwargs])\n",
+ " tag = \"\"\n",
+ " IJulia.display(MIME(\"text/html\"), tag)\n",
+ " end\n",
+ "elseif isinstalled(\"JSON\")\n",
+ " import JSON\n",
+ " global function ojs_define(; kwargs...)\n",
+ " content = Dict(\"contents\" => [Dict(\"name\" => k, \"value\" => v) for (k, v) in kwargs])\n",
+ " tag = \"\"\n",
+ " IJulia.display(MIME(\"text/html\"), tag)\n",
+ " end\n",
+ "else\n",
+ " global function ojs_define(; kwargs...)\n",
+ " @warn \"JSON package not available. Please install the JSON.jl package to use ojs_define.\"\n",
+ " end\n",
+ "end\n",
+ "\n",
+ "\n",
+ "# don't return kernel dependencies (b/c Revise should take care of dependencies)\n",
+ "nothing\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "1f15e3d4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "500-element Vector{Float64}:\n",
+ " 81.58073519246274\n",
+ " 79.19946111443744\n",
+ " 87.79342923190207\n",
+ " 83.23447080637882\n",
+ " 93.35940320922718\n",
+ " 90.50630943343556\n",
+ " 91.29729051986914\n",
+ " 87.12142446009086\n",
+ " 126.06979270954818\n",
+ " 118.2606308830268\n",
+ " 89.46900234961619\n",
+ " 123.71700591444973\n",
+ " 109.08953092800505\n",
+ " ⋮\n",
+ " 95.98817836447981\n",
+ " 97.0917897794659\n",
+ " 86.07740528385362\n",
+ " 108.02034361748942\n",
+ " 100.48723048966762\n",
+ " 116.50504792471307\n",
+ " 104.59087818874254\n",
+ " 109.38353424289971\n",
+ " 107.52722875766088\n",
+ " 90.88820850513576\n",
+ " 98.26538878313197\n",
+ " 90.57554172244384"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#| output: false\n",
+ "#| code-fold: false\n",
+ "\n",
+ "using Turing, Distributions, Random\n",
+ "using Makie\n",
+ "\n",
+ "# Random sample from a Normal(μ=100, σ=15)\n",
+ "iq = rand(Normal(100, 15), 500)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "e3dbae1f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33m\u001b[1m┌ \u001b[22m\u001b[39m\u001b[33m\u001b[1mWarning: \u001b[22m\u001b[39mFound `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n",
+ "\u001b[33m\u001b[1m└ \u001b[22m\u001b[39m\u001b[90m@ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\u001b[39m\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABUAAAAPACAIAAAB7BESOAAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nOzdeZycdYEn/icBuS/l8EAQfjKiOAjC6igsO8i8WAeGEWdGhOU3O/5clhlRQcbFGdcFT0RZRAQHFOGlOHIf4VSQK5DOnTS500k66e70fd9d3V1dx++PdsomZ3dSVd96qt7vv6qfqn76U0kg+dT3mpXNZiMAAACgtM0OHQAAAADYPQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAGFHgAAACIAQUeAAAAYkCBBwAAgBhQ4AEAACAG9g0dAADKVm1t7SuvvLL99dmzZx966KFve9vbTj/99Le//e07/N4XXnjhy1/+8uTjyy+//Lvf/W4Bg+7EhRdeuGnTpsnHixcvPuqoo6I3v6kPfehDZ511VvGDfec73/nNb34z+fj222//q7/6q+JnAIDiU+ABoFCWLFly1VVX7fo1p5122vXXX/+Zz3xmm+vDw8NbtmyZfNzZ2VmQfLvT2NiYy5BKpSYfTH1TX/nKV4IU+K6urlywoaGh4gcAgCBMoQeAkFatWnXJJZd85StfyWazhbj/VVddddJ/qK6uLsSPKJD77rsvl/yuu+4KHQcAwjMCDwDh3XHHHe9973uvueaavN+5ra0tN1g9Ojqa9/sXTn9/fy55b29v2DAAUAoUeAAohpNOOqmqqmrycTqdXr9+/YIFC26++eaxsbHJi//7f//vT3/608cff/zklxdffHFfX9/k4/3337/4gaMoWrRoUTqdnnx8+OGHB8mwQ7fccsuNN944+fjggw8OGwYAikaBB4Bi2Geffd7xjnfkvjz22GPPP//8T37yk3/1V381WdQTicSdd9558803T75gYmJieHh48vHs2bMPPPDAbW44MTHR0NCwZcuWVCp14oknnnDCCTOtss3Nzbls73znO6MoGhkZWbZs2UEHHfTRj3508svc0vcjjjhiZ/fJZDLV1dWNjY1HHHHE6aeffuSRR27zgkQikRtCP+ywww477LCpz3Z1dY2Pj08+fuc737nPPvvsNvnY2NjIyMjk4+1/ZSZ1dHSsXbu2p6fn4IMPfs973vPBD35w1qxZ279s6k9/xzvese+++6bT6WXLlrW0tLzjHe845ZRT3vrWt+42DwAUSRYAKIzcTulRFJ188sk7fM3111+fe83RRx+dyWQmr//qV7/KXf/Xf/3Xqd/S19d3/fXXH3rooVP/Qp89e/Zll122du3a3Mu+973vnXnmmVOL98knn3zmmWc+8sgjky/IXT/yyCOz2eyjjz569NFHR1F0xRVXTL7gPe95T+41o6Oj27+pr3zlK6+99toJJ5yQu7LffvtdddVVyWRyauB77rlnZ+8lm83+2Z/9We7Z2trabDb7u9/97swzzzzuuONy19/1rnedeeaZ//Iv/zL5LZ/73OdyTz3//PPb3HDNmjXnnHPONnX96KOPvuuuu3K/vDl//ud/nnvNmjVrHnvssamfs8yaNevKK6/s7+/fye8wABSVTewAIKR/+qd/yo05d3V11dTU7Pr1w8PDZ5111o033rjN7uuZTObhhx8+/fTT586dO3mloaGhurq6v78/95qNGzdWV1fvcE/7F1544b/9t//W1dU1o/CrVq264IILGhoacleSyeTPfvazCy+8cHBwcEa3mqqnp6e6urqpqSl3pbW1tbq6OrckfhduuummM844o6qqKvvmTQG7urq++MUv/vmf/3lra+vOvveBBx645JJL2tvbc1ey2ew999zz6U9/eo/eBwDkmQIPACG9+93vfve73537cunSpbt+/TXXXDO15J988slnnXVWbpp3KpW65JJLJnv4CSecsMMR+GOOOWabe6ZSqauvvjq33H36XnvttcmN8fbff//Zs//4j4qXX345d4j9HjjyyCN3OAL/3ve+d9ff+MILL/yf//N/JiYmJr+cNWvWe9/73qkrC6qqqq6++uqdffsPf/jD3DdOvf7aa6/NmTNnpu8CAPJOgQeAwN71rnflHk8d/t2h5557bvLBkUceWVdXt2HDhgULFrS2tl5++eWT13t6el599dUoiq6//vrly5dPnSJ+7733Ll++/LOf/ew29xwYGNi8efMxxxxz6aWXXnvtteecc870wx9xxBFPPvnk4OBgT0/Pt771rdz1Bx98cDoD5jt0wQUXLF++/Ktf/WruylVXXbV8+fLcBgE7ND4+PrWc/+Vf/mV7e/vmzZsHBgbuvvvu/fbbb/L6nDlzXnrppR3eYfbs2TfccENTU1MymZw/f/7UpQGvv/76nr0XAMgjm9gBQGCTG8hNyu08v0Otra25We4HHHBA7hsPOOCAb3zjG1u3bp38cg8OXfvEJz7x1FNPbbO93HT85je/ueiii6Io2m+//b797W+3t7fffffdURSl0+lbb721mOe3P/7445s3b558fMIJJzzzzDNvectboijaZ599/vEf/7G9vT33+cLNN998/vnnb3+HL3/5y9/97ncnH5999tk33njj3//9309+uXHjxoK/AQDYHSPwABDY1OPZd31i3NRnW1paTjzxxC984QvPPPPM8PDwBz/4wfn/4aqrrppphp/85Cd70N7f9773Tbb3nOuuuy73ePny5TO94d6YN29e7vE111wz2d5zrr766txeA1OPx5vqb/7mb6Z+edppp+UeT91KAABCUeABILCp26ptfwbbVEceeeTUKfGTw90XX3zxkUceef7559922225k+FmZL/99vvgBz+4B9946qmnbnPlpJNOyq05z42HF8fUTe8+9KEPbfPsW9/61mOPPXbycSKR6O7u3v4ORx111NQvDzjggHxnBIC9osADQGAtLS25x7vdp+3xxx/fZtA7iqJkMvnyyy9/9atfPfHEE6+55prc4e3TdOihh07n9PXtTR47t43cMWx9fX3bbJVfUFN/1tvf/vbtXzB1qcLAwEAxMgFAXinwABDS3Llzc6PBs2bNOuuss3b9+qOOOurZZ5/duHHjN77xjVNOOWWbZ1Op1E9/+tNvfvObBcm6nR0eydbW1jb54OCDDz7kkEOKkySKoqlLAHYYbOoHJYceemgxMgFAXinwABDST3/609zjj33sY29729um813ve9/7vv/9769bt662tvaWW245++yzpz57zz335DnlTqxcuXKbxeRr165NJBKTj08++eRtzmOLoij3bM7UXr03TjzxxNzj6urqbZ5tb2/P/aCDDjpoh3MHAKDEKfAAEEY6nb766quffPLJ3JUvfelLu/6Wm2666WP/YfHixVEUnXTSSdddd938+fOrq6tzJ6X19PTkzkIvqMbGxocffnjqle9///u5x7l19VM/ldhmZ7t58+bt2br97Z177rm5x3fcccfIyMjUZ3/4wx9ms9nJx+ecc86++zqIB4D48bcXABRDT0/PjTfeOPk4nU7X1NQsWbKkoaEh94Izzjjj0ksv3fVNjjrqqCVLlkw+vu6665588sncSPLg4GBu6fv73//+3B7ss2f/8cP6zs7OPLyTN/uf//N/Dg0NfepTnxoYGPjxj3+c6/OzZs269tprJx+ffPLJudcvWrTouuuuu+KKK/bZZ5+5c+def/31O7vzTJP/zd/8zYc+9KHVq1dHUdTe3n7uuefee++9p556ak9Pz09+8pPbb789F6xoSwwAIL8UeAAohu7u7htuuGFnzx566KH//u//vtth4QsvvPCII46YPNJswYIF73rXu0444YT999+/s7Mzdz58FEWf/OQnc49zW69HUfTFL37x/vvv//znP//Xf/3Xe/5O3mxsbOyqq67a/uC6v//7vz/jjDMmH7/vfe878cQT6+vrJ7+89dZbb7311t3eeWryX/ziF1u3bv34xz/+9a9/fWev32effe66665zzjlncqR9+fLlp59++oEHHjj1lL4oiq644ordbjQAAKXJFHoACOw973nP3Llzp3OQ27vf/e4nn3wyN1U+lUpt3rx53bp1U9v7xz/+8R/84Ae5Ly+44ILc446OjieffHLr1q35Sn7OOefscNH+eeed9+Mf/zj35Vve8pY77rhjh3c499xzTz/99B0+dfbZZ+f2pRsfH3/mmWd2e7D82Weffffdd0/doG6b9n7ppZf+6Ec/2vVNAKBkKfAAUGyzZs069NBDTzjhhL/927/9xS9+sWnTpjPPPHOa33vuuedu2LDhq1/96vbN+eSTT77zzjtffvnlqQeYX3jhhT/96U+nnqCWR2ecccaiRYs+/OEP564cdthh119//YsvvrjNmeoXXXTRK6+8MnUu/aGHHnrdddc9//zz+++//w5v/o53vOPJJ5887bTTZhTpyiuvXLdu3Wc+85kDDzxw6vVTTz312Wefffjhhw8//PAZ3RAASses3IYuAEC89Pf3t7S09Pb2HnPMMccdd9xBBx20ixcPDQ0NDg4edthhhxxyyPabw++lhoaG5ubmQw455AMf+MDOCvmknp6eTZs2HXjgge9///unftCwC+Pj493d3QcddNBhhx02/fPqx8fHa2tre3t7DzrooOOPP/6YY46Z5jcCQMlS4AEAACAGTKEHAACAGFDgAQAAIAYUeAAAAIgBBR4AAABiQIEHAACAGFDgAQAAIAYUeAAAAIgBBR4AAABiQIEHAACAGFDgAQAAIAYUeAAAAIiBfUMHiKVZs2aFjgAAAEDJyWazhbu5EXgAAACIASPwe66gn6yQFwMDAwMDA4cffvjhhx8eOgsAu9LZ2Tk2NnbMMccccMABobMAsCtNTU3ZbPa4444zMXkbRfgFMQIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8AAAABADCjwAAADEgAIPAAAAMaDAAwAAQAwo8ADAjCXTme6RZOgUAFBZ9g0dAACIh7FUZkPH0KrWwSWNfStbBpPpzLGHH/Bn73nrR4874uMnvO3g/fYJHRAAypwCDwDsxuBY6uZXa1+p7U5lslOvtwyMzVndNmd12377zj7z3Yd//bw/OfbwA0KFBICyZwo9ALAry5r6L/vN8t9v7NqmvU+VTGUWNfT99wffWNLYV8xsAFBRFHgAYMdSmewvFm/90hNrOoentdx9cCx19Zw1v17WVOhgAFCZTKEHAHagvjdxw/MbNnQOz+i7Mtnop/Pra7tHbjj/ffvva5wAAPJJgQcAtrW6dfBLc9aMTqT37Ntf2NDZ3D9668UfPPKg/fIbDAAqmY/GAYA3qetJXPv02j1u75PWtg/9r6fXTaQz+UoFACjwAMAfdQyNX/3kmsGx1N7fam370F0LG/b+PgDAJAUeAPiDvsTEF59Y3TE0nq8b3l/dPHdzd77uBgAVToEHAKIoikaS6aufXLO1bzSP98xmo+++uKl1cCyP9wSAiqXAAwDRRDrzL8+un+me89MxNJ76+nPrkymL4QFgbynwAEB00yublzT2Fejm6zuG75hfX6CbA0DlUOABoNK9tKnr2XXtBf0RD69oedVieADYOwo8AFS0xr7R7724qQg/6MaXagfysbk9AFQsBR4AKlcqk/3mCxsSe3fk+zQNjk38YlFDEX4QAJQrBR4AKtcdVfVr24eK9uMeX91W15Mo2o8DgDKjwANAhVrY0PfQiuZi/sR0JvtTu9kBwJ5S4AGgEnUOj3/zhQ3ZbLF/blVdz+KthdruHgDKmwIPABUnk81e//yG/tGJID/99qq6TNE/OACAMqDAA0DFeWRl6xvNA6F+em3XyDMFPrUOAMqSAg8AlaV1YOxnCxvCZrhrQcNIshhb3wNAOVHgAaCCZLPR91+uTYQuz72J5K+XNYXNAACxo8ADQAV5am3bksaS2EPugTea24fGQ6cAgDhR4AGgUnQNj99RVSqnuI2nMr9a2hg6BQDEiQIPAJXiB69sHhpPhU7xR8+u7+geSYZOAQCxocADQEX4bU3HvLqe0CneJJnKPLSiJXQKAIgNBR4Ayl9PInnra1tCp9iBx1e1ltSkAAAoZQo8AJS/W+ZuGRwrxZ48kkw/tqo1dAoAiAcFHgDK3MKGvpc3dYVOsVMPr2gdT2VCpwCAGFDgAaCcjaUyP3ylNnSKXelNJJ9e2x46BQDEgAIPAOXsrgX1rYNjoVPsxr8vb0plsqFTAECpU+ABoGxt6hp+ZGUMVpi3D42/Vt8fOgUAlDoFHgDKUyYbfe+l2nRMRrYfXtOdycYjKgCEosADQHl6aEVzTcdQ6BTT1TgwvqxlJHQKAChpCjwAlKG2wbG7F20NnWJmHq/pCx0BAEqaAg8AZejmVzcnkunQKWampnu0ptMgPADslAIPAOXmxY1d8+t7Q6fYE4+tbgsdAQBKlwIPAGVlcCz1o9c2h06xh17Z3NubmAidAgBKlAIPAGXlJ/Pq4tuBJ9KZJ9cYhAeAHVPgAaB8VDcPPLu+PXSKvfL46rZUTI6+A4AiU+ABoEyMpzI3vrQp7oepdw2Pz93cHToFAJQiBR4AysTdixqa+kdDp8iDR1a2ho4AAKVIgQeAclDbNfLgGy2hU+THypaB9R1DoVMAQMlR4AEg9jLZ6LsvbSqnpeOPr7KVHQBsS4EHgNh7oLq5pryGrH+/sbN/NK576QNAgSjwABBvW/tGf76oIXSKPBtPZZ5aG+/t9AEg7xR4AIixTDb7vZc2jacyoYPk3+OrWtNltCgAAPaeAg8AMXZ/dcvKloHQKQqifWj89S09oVMAQAlR4AEgrhp6Ez9f2BA6RQE9vtpWdgDwRwo8AMRSOpP91u83JtNlOHk+Z1lT39a+cjjZHgDyQoEHgFj69fKmde1ltfP89rLZ6InVraFTAECpUOABIH7qexP3Lm4MnaIYnl3XMVaOW/QBwB5Q4AEgZlKZ7LdeKPPJ8zlD46kXN3aGTgEAJUGBB4CY+cWires7ynzy/FSPrzKLHgCiSIEHgHhZ0TJw37KKmDyfs75juKI+sACAnVHgASA2hsZT33xhYyYbOkfRPeE8OQBQ4AEgRn7wSm3b4FjoFAG8sKFzcCwVOgUABKbAA0A8PL22/cWNXaFThDGeyvy2piN0CgAITIEHgBho7h+99bUtoVOE9MTqtmzlrR0AgKkUeAAodalM9vrnNyQm0qGDhNTQm6hu7g+dAgBCUuABoNTduaB+bbtt2KPHbWUHQGVT4AGgpFXV9dxf3Rw6RUmYu7m7c3g8dAoACEaBB4DS1TY49u3fb7T2e1I6k31qbXvoFAAQjAIPACUqmcr8y3M1A45Pm2LO6rZUxucZAFQoBR4AStStr2+p6bD0/U26R5JzN3eHTgEAYSjwAFCKXtzY+YQ923bksVWtoSMAQBgKPACUnMb+0e+/XBs6RYl6o3mgtmskdAoACECBB4DSMjqRvu6ZdSPJij71fdcMwgNQmRR4ACgt33uptq4nETpFSftdTcfg2EToFABQbAo8AJSQ+6ubX9zYGTpFqRtLZX5X41cJgIqjwANAqVjVOvhv8+tDp4iHR1e1ZrLOkwOgsijwAFASukeS//rceoecT1Nj3+iypv7QKQCgqBR4AAgvlcn+63Pru0eSoYPEyaMrbWUHQGVR4AEgvP87d/Oq1sHQKWKmqq6ndWAsdAoAKB4FHgACe2Zdx5zVbaFTxE8mGz2+2iA8ABVEgQeAkDZ1Dd/8am3oFHH1xOq2kWQ6dAoAKBIFHgCCGRyb+Nqz68dTmdBB4mokmX5mXXvoFABQJAo8AISRyWZveGFji1Xce+ehFS1pW/cDUBkUeAAI497FjQvqe0OniL3WgbG5m7tDpwCAYlDgASCApY399y7ZGjpFmfj35U2hIwBAMSjwAFBs7UPj3/hdjXnf+bK+Y9ghfABUAgUeAIoqmcp87dn1/aMToYOUlfurm0NHAICCU+ABoKjumF9f0zEUOkW5eX1LT1P/aOgUAFBYCjwAFM/Chr5HVraETlGGMtnsQyv8wgJQ5hR4ACiS3sTEt3+/IWvpe2E8s7Z9YCwVOgUAFJACDwDFkMlmr3++pjdh6XuhjKUyT6xuDZ0CAApIgQeAYrhvWdPSxv7QKcrcIytax1OZ0CkAoFAUeAAouPUdw79Y5NT3gutJJA3CA1DGFHgAKKzh8dTXn1ufcux7Ufx6WfOYQXgAypQCDwCFdfPcza2DY6FTVIqeRPKxlQbhAShPCjwAFND8+t7nazpDp6gsv17elJhIh04BAPmnwANAoYwk0ze9vCl0iorTPzrxqEF4AMqRAg8AhXLb61s6h5OhU1Si3yxvGkkahAeg3CjwAFAQy5v6n17XHjpFhRoYSz28oiV0CgDIMwUeAPJvLJW58eXarI3nw7m/unlwbCJ0CgDIJwUeAPLvzvn1zf2joVNUtKHx1MMrrIQHoKwo8ACQZ2vbhx5Zaf52eA+uaBkcS4VOAQB5o8ADQD5NpDPffXFTxuT5EjA8nrpvWWPoFACQNwo8AOTTQyta6npGQqfgDx5a0VrfmwidAgDyQ4EHgLzpS0z8cmlT6BT80UQ6c8vcLaFTAEB+KPAAkDc/W9gwPG7RdWlZ2tj30qau0CkAIA8UeADIj7qeEQe/l6bbXt+SSKZDpwCAvaXAA0B+/Pj1urTN60pS53Dy3iVbQ6cAgL2lwANAHry+pWfx1r7QKdipB95o2dxtc0EA4k2BB4C9NZHO3D6vLnQKdiWdyf7fuZuzZkgAEGcKPADsrYdXtDb2j4ZOwW680TxgNzsAYk2BB4C90peY+OXSxtApmJbb5tUNjjkmAIC4UuABYK/cs3jrkKPjYqJrePw7L240kR6AmFLgAWDPdY8kHR0XL69v6Xl4ZUvoFACwJxR4ANhzv1zSOJ7KhE7BzNw+r25N22DoFAAwYwo8AOwhw+8xlcpkr39+w7CFDwDEjQIPAHvoV0sNv8dVy8DY917aFDoFAMyMAg8Ae6InkXxqreH3GHultvvxVa2hUwDADCjwALAn7lvaZPg97n78et2mrpHQKQBguhR4AJixnkRyzpq20CnYW8l05tqn1rYOjoUOAgDTosADwIz9elmz4ffy0Dk8/o+PrmofGg8dBAB2T4EHgJnpSSTnrLZ2uny0D41/8fHVvYmJ0EEAYDcUeACYmV8vaxoz/F5eGvtHvzRn9eCYg+UAKGkKPADMQF9iYs5qq9/LUG3XyLVPrU0k06GDAMBOKfAAMANz1rQZfi9Xq9sGr3t2vd9fAEqWAg8A05XKZJ8w/F7Wljb2/Y+HV7bZlx6AkqTAA8B0vbixq3PYduVlblPX8D88tGJFy0DoIACwLQUeAKbr4RXNoSNQDH2JiS/NWfPbmo7QQQDgTRR4AJiWFS0D6zuGQ6egSJKpzLde2Pij17ZkstnQWQDgDxR4AJiWh1e0hI5AsT28ouV/PbO+zxHxAJQGBR4Adq9tcOy1LT2hUxBAVV3Ppb+pnud3H4ASoMADwO49srI1nTGVukL1JpL/69l1N71SOzrhlHgAQlLgAWA3xlKZZ9a1h05BSNlsNGd1239/cMWGTvsgABCMAg8Au/HM2vbBsVToFITX0Jv4/MMr71vWZDYGAEEo8ACwK9ls9Oiq1tApKBUT6cy/za+/4pGVTf2jobMAUHEUeADYlfn1PQ29idApKC1r2gb/3/vfmLO6LXQQACqLAg8Au/KY4Xd2JDGRvumV2q89u75/1CFzABSJAg8AO9UxNL54a1/oFJSuuZu7L7//jXXtQ6GDAFARFHgA2Kmn17bbroxd6xwev/KxVc4pAKAIFHgA2LFMNvvs+o7QKYiBZCrz3Rc33fRKbcrnPQAUkgIPADu2ZGt/2+BY6BTExpzVbV98YnVfwpJ4AApFgQeAHXvapGhm6I3mgc87YQ6AglHgAWAHBsZS8+p6Qqcgfpr7R7/w+OrWAXM3AMg/BR4AduDZde3JVCZ0CmKpY2j8S3PWdA2Phw4CQLlR4AFgB55ZZ/s69lxT/+gXHl/daz08AHmlwAPAtla1Dtb1jIROQbxt7Rv98pw1g2Op0EEAKB8KPABs6+m1tq8jDzZ1DV/z5JrERDp0EADKhAIPAG+SmEi/tKkrdArKxNr2oa8/V5PJOh8egDxQ4AHgTV6o6Rw1ZEr+LGzofaC6JXQKAMqBAg8Ab+L4d/LuzgX1a9uHQqcAIPYUeAD4o7qexDpFi3xLZbI3PL/BYngA9pICDwB/9Lsap8dREE39oze/sjl0CgDiTYEHgD/IZqMXbV9Hwfy2psMnRADsDQUeAP5gZetA68BY6BSUsx++urmxfzR0CgDiSoEHgD94vqYzdATKXCKZvuH5DamMU+UA2BMKPABEURRNpDOv1Jo/T8Gtax96fFVr6BQAxJICDwBRFEULG/oGxlKhU1ARfrG4cdAfNgBmToEHgCiKouc3mD9PkQyOTfxyaWPoFADEjwIPANFIMl1V1xM6BRXkkZWtTXazA2CGFHgAiF6t7RpPZUKnoIJMpDN3LmgInQKAmFHgAcD8eQJ4eVPXypaB0CkAiBMFHoBK1z2SXN7UHzoFlegn8+qyTpQDYNoUeAAq3e83djqWmyDWtg+9tMnsDwCmSwFKWhMAACAASURBVIEHoNI9X6NBEcy/LWhIpu2/AMC0KPAAVLSG3sSGzuHQKahcrQNjj6xoDZ0CgHhQ4AGoaC/Yvo7Q7q9uTjoEAYBpUOABqGgv13aHjkCl60kkX9jogyQAdk+BB6BybeoabuhNhE4B0QPVzbajB2C3FHgAKtfLmwy/UxK29CSWNPaFTgFAqVPgAahcr25W4CkVD77REjoCAKVOgQegQpk/T0lZ2NBb2zUSOgUAJU2BB6BCmT9PqXl4pUF4AHZFgQegQpk/T6l5fkNnTyIZOgUApUuBB6AS1XaNmD9PqUmmMk+sbgudAoDSpcADUIleru0KHQF24LFVreOpTOgUAJQoBR6ASvRqrfnzlKK+xMTvajpCpwCgRCnwAFSczd0j9ebPU6rur27OZLOhUwBQihR4ACrOK4bfKWFb+0armwdCpwCgFCnwAFSclzdZAE9J++16s+gB2AEFHoDKYv48pe+V2u5EMh06BQAlR4EHoLIYfqf0jU6k526x0AOAbSnwAFQWC+CJhd+u7wwdAYCSo8ADUEHqesyfJx6WN/W3D42HTgFAadm3OD8mkUg8/fTT69atSyQSxx9//EUXXXTSSSfN9CaZTOaVV15ZvHhxT0/PMcccc+6555511lm7eH1nZ+fixYs3bdrU1tZ20kkn/emf/unHPvaxt7zlLXvxPgCIt7mbe0JHgGnJZLO/q+n4Hx89PnQQAEpIMQr8U089dcUVV/T29uau/PM///OVV155xx13HHDAAdO8yfr16y+77LI1a9ZMvXj22Wc/+OCDxx+/7d9t6XT6jjvu+Na3vjU0NDT1+gc+8IE777zzE5/4xB69DwBib+5m8+eJjefWd3z+I8fPmhU6BwAlo+BT6H//+99/5jOf6e3tnT179kc/+tFPfepThx12WBRF99xzz+c+97lp3qS5ufm8886bbO8nnXTSJZdccuyxx0ZRtGDBgvPPP7+/v3+b11977bVf/epXh4aG3vrWt1544YVXXHHFxz/+8VmzZtXU1Jx33nmPPfZYXt8iAPHQNji2sWs4dAqYrsa+0bXtg6FTAFBCClvgx8fHr7jiinQ6fcQRR6xdu3bJkiVPP/10d3f3RRddFEXRo48++vTTT0/nPtdee21HR0cURQ888EBtbe2jjz7a3Nz8gx/8IIqiTZs2fec735n64hUrVvzsZz+Lougv/uIvNm7c+Nvf/vbee+9duHDhwoULjzvuuCiK/umf/qmlpSXvbxaAEvfalp5sNnQImInf1tjKDoA/KmyBf+CBByar8gMPPPCBD3xg8uJb3vKWhx566MQTT4yi6JZbbtntTTZv3jxnzpwoir72ta9dfvnluetf//rXL7nkkiiK7r777sHBP34+fdNNN6XT6QMPPPC+++47+uijc9c/9rGP3X///VEU9fX1PfLII3l5gwDEiPnzxM6LG7uSqUzoFACUisIW+KeeeiqKove///0XXnjh1OuHHHLIZPdetGhRV9duzuN95plnstnsrFmz/vmf/3mbpz7/+c9HUTQ6OvrSSy/lLr7++utRFF100UXvfve7t3n9f/kv/2VywXx1dfWevSMAYqp/dGJVq9nIxMzg2ERVfe/uXwdAZShsga+qqoqi6C/+4i+2f+pTn/pUFEWZTGb+/Pm7vsm8efOiKDrllFPe+c53bvPUeeedd8ghh+ReE0XR6Ojo5CcCp5566g7vNjmLvq2tbSbvA4DYe31LTzpjAj3x89uajtARACgVBdyFvqOjY3J7uT/90z/d/tmPfOQjs2bNymazmzZt2vV9Nm7cuLOb7L///h/60IcWLlyYu8ns2bOfffbZKIpOO+207V8/Pj5eU1MTRdEeHGIHQKyZP09MLajv7Ukkjzxov9BBAAivgAW+qalp8sH2U9mjKNpvv/2OPvrozs7OxsbG6dxnhzfJXc/dZP/995/cIW+HfvSjH02eZvd3f/d3u38DAJSLRDK9tHHbI0sgFtKZ7O83dF5+xo7/IQRARSlggR8e/sNRPZOz3Ld3yCGHdHZ2bnNU+zay2Wwikdj1TaIo2vVNJu9z++2333DDDVEUfeITn/jkJz+569fvv//+u35BNOUTCkrW0NDQ4ODg8PDw1G0OgQo0v3FoeHQsdAp2JZVKZbPZyV1vQmcpOXPeaDjnaAtAgFLR2tqazWajKPJ/7OIrYIEfHR2dfLCzMjx5fbKf78zY2NjkH469ucnKlSuvvfbayc3tTj311CeeeGK34ZPJ5G5fk3UYUcmb/D2a/Bdh6CxASAuadvM5L5Sy2t6xrpGJow4q4D/bAKYv92/s0EEqUQH/JjjggAMmH+ysDI+Pj0dRtN9+u1rTlevte3aTjo6Ob3zjG/fdd18mk4mi6POf//xtt912+OGH7zb82NiuBmom39rkfniUsoGBgYGBgcMPP3w6v+lAuUplsut6m3f91w3BTe6Ms++++86eXdgddmNq0+j+Hz75XaFTAPxBNps97rjjjMAXXwEL/MEHHzz5YGRkZIcv2PXc+EmzZ88+8MADR0dH9+Am995779e+9rXJjfQ+/OEP33LLLTvcD3+HpjOF3p/X0jdritBZgGCWNfUPjadCp2A3Jgt86BSl67UtPZd9+NjQKQCi6D96kH9jB1HAD7mPPfYPf820trZu/2wqlers7Iyi6F3v2s3HyZP32eFNoihqaWnZ/iaZTOYf/uEfrrzyyv7+/uOPP/6BBx6orq6efnsHoJzYf54y8EZzf19iInQKAAIrbIGfHBifPAduG5s3b56c1v6BD3xg1/c5+eSTd3aTKIpqa2u3v8mXvvSl3/zmN1EUXXXVVevWrbv88st9OARQmTLZ7LwtPaFTwN7KZKOq+t7QKQAIrLDLzM4666woiqqqqrZ/Kndx8jW7cPbZZ0dRtGrVqu23mq+trW1vb8+9ZtKDDz7485//PIqi22+//a677tr1FH0Aytvq1sGexO73JYXSZy4JAIUt8BdffHEURUuWLKmpqdnmqV//+tdRFP2n//SfdrsV3ORNJiYm7r///m2euu+++6Io2m+//S688MLcxZ/85CdRFH3605++5ppr9jI/AHH3muF3ysWSrX2JiXToFACEVNgC/7nPfe7oo4+OougLX/jC1H3df/7zny9YsCCKoq997WtTX/+rX/3qsssuu+yyyxYtWpS7eMopp1xwwQVRFH3729/eunVr7vqKFStuu+22KIquuOKKt73tbZMX169fv2zZsiiK/vN//s9rd665ublw7xqA0jGvToGnTCTTmfn+PANUtoLv+Proo49eeumlURSddNJJn/70p48++uiXX3755Zdfzmazf/3Xf/30009PXZ3+5S9/+c4774yi6KGHHrrsssty12traz/60Y/29/cfeeSRF1988WmnnbZw4cLnnntuZGTkhBNOWLZs2VFHHTX5yieeeOIzn/nMblNdeumlDz/88B6/qcnMNsstfY6RgwpX15P47L8vD52CaUmlUplMxjFyu/ZfTz7mpgvfHzoFUOmampocI7dDReiJBTxGbtJnP/vZoaGha665ZvPmzT/60Y9y1y+99NJ77713mr/lf/Inf/L8889fdtllW7du/eUvf5m7fvrppz/66KO59h5F0ZYtW/IYHoBYe22LNcOUlfn1Pcl0Zr99fMYBUKGKdOZqR0fHI488sm7dutHR0eOPP/7iiy/+yEc+sv3LamtrJ4+FO+WUU4455phtnh0dHX3qqacWLlzY29v79re//ROf+MQFF1yw775v+gxiy5YtTU1Nu81zzDHHnHLKKXv8dozAx4UReKhw/99DK9a2b7sBKqXJCPw03XbxB8/5f44MnQKoaEbgd6YIPbFIBb7MKPBxocBDJetNTPzlLxZn/L86JhT4afrUB9/+zf96cugUQEVT4HemCD3R35EAlKfXt3Rr75Sf17f0pDP+YANUKAUegPI0r643dATIv4Gx1IqWgdApAAhDgQegDI2lMssa+0KngIKYu9nujAAVSoEHoAwtaugdS2VCp4CCmLu5x+oQgMqkwANQhqrMn6d8dQ6P13Q6XgGgEinwAJSbTDaqqu8JnQIKaGGDFSIAlUiBB6DcrG4d6EtMhE4BBbSw3hwTgEqkwANQbl6vM/xOmVvbPjgwlgqdAoBiU+ABKDevb1bgKXOZbLRkq0F4gIqjwANQVhp6E439o6FTQMFZBg9QgRR4AMrK61sMv1MRFjX0OUwOoNIo8ACUFQWeCtGTSG7sGg6dAoCiUuABKB+9ieTa9sHQKaBIFjZYBg9QWRR4AMrH/PrejEnFVAzL4AEqjQIPQPmoqjMgSQVZ3Tow6DA5gEqiwANQJpKpzJKtBiSpIJlstLTRn3mACqLAA1AmljcPJCbSoVNAUZlFD1BRFHgAysS8OvvPU3EW1Pc6TA6gcijwAJSJBfUWwFNxehLJ2u6R0CkAKBIFHoBysKlrpG1wLHQKCGBBvbknAJVCgQegHLy+pTt0BAhjkb0bASqGAg9AOXCAHBVrVevg0LjD5AAqggIPQOx1jyRrOodCp4Aw0pnssqb+0CkAKAYFHoDYm28jbiqbHRwBKoQCD0DsOUCOCrfEMniAyqDAAxBv46nMUu2FytY+NN7UPxo6BQAFp8ADEG9LtvaNpTKhU0BglsEDVAIFHoB4q7L6F6JoaaMCD1D+FHgAYiybtX0XRFEULWvsy9jLEaDcKfAAxFhN51Dn8HjoFBDewFiqtmskdAoACkuBByDGquoMv8MfmEUPUPYUeABirMoBcvAfljY5jgGgzCnwAMRV1/D4xq7h0CmgVKxoHkg6kQGgrCnwAMTVvLpem3ZBzlgqs7Z9KHQKAApIgQcgrsyfh204DR6gvCnwAMTSeCqzXFeBN1uy1TJ4gHKmwAMQS0sa+8Ys94U3W9cxNJJMh04BQKEo8ADE0rwt5s/DttKZ7IqWgdApACgUBR6A+Mlmo4UNpgrDDixt9J8GQNlS4AGIn5rOoc7h8dApoBQtbbQ3BEDZUuABiJ+qut7QEaBEbekZ6UkkQ6cAoCAUeADixwFysDPZbFTdZBk8QHlS4AGIma7h8Y1dw6FTQOmyDB6gXCnwAMRMVX1vNhs6BJSwJZbBA5QpBR6AmLEAHnatbXCsuX80dAoA8k+BByBOxlOZZaYHw+5UN1sGD1CGFHgA4mTJ1r6xVCZ0Cih11c1m0QOUIQUegDipqjd/HnZveZMCD1CGFHgAYiObjRYo8DANncNJy+AByo8CD0BsbOgc6hweD50C4sEyeIDyo8ADEBvmz8P0WQYPUH4UeABiwwFyMH2WwQOUHwUegHjoGh7f0DkUOgXEhmXwAOVHgQcgHqrqerPZ0CEgViyDBygzCjwA8TC/wfx5mBnL4AHKjAIPQAyMpzJLt/aFTgExYxk8QJlR4AGIgaWNfWOpTOgUEDOWwQOUGQUegBiw/zzsGcvgAcqJAg9Aqctmo/n1PaFTQCxZBg9QThR4AErdxq7hzuFk6BQQS5bBA5QTBR6AUldVZ/gd9lDncLJlYCx0CgDyQ4EHoNRZAA97wzJ4gLKhwANQ0rpHkjWdQ6FTQIxVm0UPUC4UeABK2vz63mw2dAiIs+X2sQMoFwo8ACVt3hYL4GGvdAyNWwYPUB4UeABKVzKdWWb2L+w1y+AByoMCD0DpWt40MDqRDp0CYm95U1/oCADkgQIPQOma5wA5yIc3jMADlAUFHoDStaDeAXKQB+1D422DlsEDxJ4CD0CJ2tQ1onJAvrzRYhAeIPYUeABKlPnzkEcrzKIHiD8FHoASVaXAQ/4YgQcoAwo8AKWoN5Gs6RgOnQLKR2PfaNfweOgUAOwVBR6AUlRV15vJZkOngLKywiA8QMwp8ACUIvPnIe9WtAyGjgDAXlHgASg5yVRmaWN/6BRQbqqb/WcFEG8KPAAlZ3nzQGIiHToFlJv63kRvIhk6BQB7ToEHoOSYPw+FkM1Gq1rNogeIMQUegJKzoKE3dAQoT5bBA8SaAg9AadnUNdw6MBY6BZSnNyyDB4gzBR6A0lJVZ/gdCmVT1/DQeCp0CgD2kAIPQGmxAB4KJ2MZPECcKfAAlJDeRHJ9x3DoFFDOzKIHiC8FHoASMr++L5PNhk4B5cw+dgDxpcADUELMn4dCW98xlJhIh04BwJ5Q4AEoFclUZsnWvtApoMylM9k1bQbhAWJJgQegVFQ3DxgYhCJ4o3kgdAQA9oQCD0CpqKo3fx6KYUWLAg8QSwo8AKVifr0T4KEY1rYPJdOZ0CkAmDEFHoCSsLl7pHVgLHQKqAjJVGZt21DoFADMmAIPQEmYZ/95KKI3zKIHiCEFHoCSUFVn/jwUzwr72AHEkAIPQHh9iYl17Sb0QvGsah2YsAweIG4UeADCm1/fk8lmQ6eACjKWymzoHA6dAoCZUeABCK/K/vNQdCtaBkNHAGBmFHgAAptIZxY39IVOARXnjeb+0BEAmBkFHoDAqpsHEhPp0Cmg4qxsHcxYuQIQKwo8AIHN2+IAOQhgeDy1qcsyeIA4UeABCGy+BfAQiFn0APGiwAMQ0paeROvgWOgUUKHsYwcQLwo8ACHN29IdOgJUrurmfic4AsSIAg9ASFV15s9DMINjqYbe0dApAJguBR6AYPoSE2vbzeCFkKotgweIDwUegGAWNPQ6xQrCWtEyEDoCANOlwAMQjP3nIbg3mhV4gNhQ4AEII5XJLt7aFzoFVLrukWRjn2XwAPGgwAMQxhvNA8PjqdApALPoAWJDgQcgjKq6ntARgCgyix4gPhR4AMKwAB5KxIpWBR4gHhR4AAKo6xlp6rfsFkpC68BY2+BY6BQA7J4CD0AA8+ttXwclZEXLYOgIAOyeAg9AABbAQ0l5o7k/dAQAdk+BB6DYBsdSq9sM90EJecNG9ABxoMADUGzz63vSmWzoFMAfNfaNdg6Ph04BwG4o8AAUmwXwUIIcJgdQ+hR4AIoqnckuanCAHJQcs+gBSp8CD0BRrWgZGBpPhU4BbGt5o33sAEqdAg9AUVXVGX6HUtTYbxk8QKlT4AEoKgfIQcmyDB6gxCnwABRPU/9oY/9o6BTAjlkGD1DiFHgAiuf1LYbfoXRVN1kGD1DSFHgAisf8eShlW/tGuyyDByhhCjwARTI8nlrVOhg6BbArb7T4jxSgdCnwABTJwobeVCYbOgWwK9XNZtEDlC4FHoAimV/fFzoCsBuWwQOUMgUegGLIZKOFDU6Ah1JnGTxAKVPgASiG1a0D/aMToVMAu7fCMniAUqXAA1AMVfWG3yEeLIMHKFkKPADF4AA5iIvq5oHQEQDYMQUegIJrGxyr60mETgFMS0NvwjJ4gNKkwANQcPPqzJ+HOLEMHqA0KfAAFNz8evPnIU4sgwcoTQo8AIU1OpGubrKkFuLEMniA0qTAA1BYi7f2JdOZ0CmAGbAMHqA0KfAAFNYCB8hBDFkGD1CCFHgACiibjRY09IVOAczYsibL4AFKjgIPQAHVdA6biAtxtLTRR28AJUeBB6CA7D8PMdUyMNY6MBY6BQBvosADUEBVToCH2FpuL3qAEqPAA1Ao3SPJDZ1DoVMAe8gseoBSo8ADUChVdT3ZbOgQwJ5a2tjnP2GAkqLAA1AoDpCDWOtNTNT3JkKnAOCPFHgACiKZyix1DBXEnMPkAEqKAg9AQSxv7k8k06FTAHtlmWXwAKVEgQegIObbfx7ir7p5IGMZPEDJUOABKIiFWw3cQewNjac2dA6HTgHAHyjwAORfXU+iuX80dAogD8yiBygdCjwA+Tff/vNQLuxjB1A6FHgA8m9+fU/oCEB+rGwZSKYyoVMAEEUKPAB5Nzg2sap1MHQKID/GUpm17UOhUwAQRQo8AHm3aGt/2r7VUEbMogcoEQo8AHlm/jyUmaX2sQMoDQo8APmUyUaLGvxbH8rK2vahxEQ6dAoAFHgA8mpN22D/6EToFEA+pTPZlS02tgAIT4EHIJ8cIAdlySx6gFKgwAOQTxbAQ1lS4AFKgQIPQN50DI1v7h4JnQLIv9ruke6RZOgUAJVOgQcgb6rqerLOj4NylM0ahAcIT4EHIG8sgIcytnirAg8QmAIPQH6MpzLLm/pDpwAKZfHWPlNsAMJS4AHIj2VN/WOpTOgUQKH0JiZqbXIBEJQCD0B+VNXZfx7KnFn0AGEp8ADkx8IG/7KHMreowT4XACEp8ADkwebukbbBsdApgMJa2TqYSKZDpwCoXAo8AHlg/3moBBPpzIqWgdApACqXAg9AHijwUCEWWQYPEI4CD8DeGhybWNM2GDoFUAz2sQMISIEHYG8tbOhLZxwPDRWhoTfRasMLgEAUeAD2lvnzUFGWGIQHCESBB2CvZLJOloLKYhY9QCgKPAB7ZU3b4MBYKnQKoHiWNvZbNQMQhAIPwF4xfx4qzdB4al37UOgUAJVIgQdgr8yv7wkdASg2s+gBglDgAdhzncPJzd0joVMAxabAAwShwAOw56rqerJWwkLlWds+OGjzC4CiU+AB2HMLLICHipTJRsua+kOnAKg4CjwAeyiZzixtNI0WKpQNLAGKT4EHYA8tbxoYS2VCpwDCmF/fk7GEBqC4FHgA9pD956GS9SUmHCYHUGQKPAB7aH6dCbRQ0ar8TwCguBR4APZEXc9I6+BY6BRASJbBAxSZAg/AnvAPd2BT13CbD/IAikiBB2BPKPBA5CxJgOJS4AGYseHx1OrWwdApgPB8lgdQTAo8ADO2aGtfKuP4KCBa1tTvOEmAolHgAZgx+88Dk8ZTmaVb+0KnAKgUCjwAM5PJZhdtVeCBPzCLHqBoFHgAZmZ9x3BvYiJ0CqBUVNX3Zi2pASgKBR6AmZlf1xM6AlBCuobHN3UNh04BUBEUeABmxnRZYBvzfK4HUBQKPAAz0D2S3GioDXgzn+sBFIcCD8AMLLDYFdhOTcdwTyIZOgVA+VPgAZgB42zA9jLZ7IJ6h8kBFJwCD8B0TaQzSxz4DOzI/HrL4AEKToEHYLpWtAwmJtKhUwClaGF971gqEzoFQJlT4AGYLvPngZ0ZS2UWm6EDUGAKPADTZYossAuv1naHjgBQ5hR4AKaluX+0sW80dAqgdM2r65lIm0UPUEAKPADTMq/O8DuwK8PjqWVNA6FTAJQzBR6AabEAHtitV2q7QkcAKGcKPAC7l0imV7YOhk4BlLrXNnenM9nQKQDKlgIPwO4t3tqXdEAUsDsDY6k3WsyiBygUBR6A3bP/PDBN9qIHKBwFHoDdyGajRVv7Q6cA4uHV2u5M1ix6gIJQ4AHYjZrOoa7h8dApgHjoSSTXtA2FTgFQnhR4AHZjfp3954EZsBc9QIEo8ADshgXwwIy8UtttEj1AISjwAOxKbyK5oXMkdAogTjqGxms6zaIHyD8FHoBdmV/faz8qYKbsRQ9QCAo8ALtiATywB17drMAD5J8CD8BOpTLZpU0OkANmrLFvdEtPInQKgHKjwAOwUytaBobHU6FTALH04sbO0BEAyo0CD8BOmT8P7LHf1XTaQAMgvxR4AHaqqs4BcsAeahscW9M+GDoFQFlR4AHYsZaBscb+0dApgBh7vsYseoB8UuAB2LF5ht+BvfP7jZ0T6UzoFADlQ4EHYMcsgAf20uBYavHWvtApAMqHAg/ADiSS6RWtA6FTALH3/Aaz6AHyRoEHYAcWb+1Lpkx8BfbW61t6nEYJkC8KPAA7ML/eAnggD8ZTmddtqAGQJwo8ANvKZLML6i1bBfLDXvQA+aLAA7Ctmo7hnkQydAqgTCxt7O8cHg+dAqAcKPAAbGt+vf3ngbzJZLMvb+oOnQKgHCjwAGyryoJVIK/sRQ+QFwo8AG/SNTy+sev/b+/O45wqD/2PP7MyAyODygCCjOKCVhFEr3qvXNuL1Vs31Lbu1p9a7a/otV5ttdiKVntdqrY/i1qK1w1UFkEEZJdFZR8YmI1hYPZ9SSbJZE9OknN+f0RHHGYnyZNz8nm/+kdIDiff6PSY7zzPeR6X7BQADKWszVltcctOAQC6R4EHAHzP9mqrpskOAcBwvjhilh0BAHSPAg8A+J6dtdwADyDy1pWZVH47CADHhwIPAPiOElT3NXTITgHAgJodvvwGu+wUAKBvFHgAwHf2NXR4lJDsFACMaUVJi+wIAKBvFHgAwHfYQA5A9HxVZbF6FNkpAEDHKPAAgO9wAzyA6AmE1LWH2E8OAAaPAg8A+EaF2d1s98lOAcDIVpS0sJIdAAwaBR4A8I3tNRbZEQAYXH2Ht6CJpewAYJAo8ACAb+yoZv48gKhbcZCl7ABgkCjwAAAhhLD7ggdbHbJTADC+LRXtHd6A7BQAoEsUeACAEELsrLGo3JgKIPqUoLqurE12CgDQJQo8AEAIIbYxfx5ArHxW0spSdgAwCBR4AIAIqlpenU12CgCJotbqKW7hnh0AGDAKPABA7G/scPqDslMASCArSljKDgAGjAIPABDbqthADkBMbSo3O3wsZQcAA0OBBwCI7dwADyC2/EF1/WGz7BQAoDMUeABIdJXt7maHT3YKAAln4f5GNr8AgAGhwANAottWzfx5ABI0O3xcfwBgQCjwAJDomD8PQJZFBxplRwAAPaHAA0BCs3qU0lan7BQAEtSBRjuXIADoPwo8ACS0HTU2VeMmVADSLClskh0BAHSDAg8ACW1bVbvsCAAS2qYj5janX3YKANAHCjwAJC4lqO6t75CdAkBCC6rasqJm2SkAQB8o8ACQuPIb7Z5ASHYKAInus5JWj8K1CAD6RoEHgMTFBk4A4oHDF1hzqE12CgDQAQo8ACQoTeMGeADxYklhEwtqAkCfKPAAkKDKzS6TS5GdAgCEEKLe5t1ebZWdAgDiXWps3sbj8axataq0tNTj8eTm5t5www1nnXXWQE+iquqWLVv27NljsVhGjRr1H//xH5dffnl//uLKlSs3bNjwxz/+MTc3d+DZAcCYttfwXRlAHFl4oPFHZ54sOwUAxLVYFPiVK1c+8MADVut33xQff/zxJp8GmgAAIABJREFUX/3qV2+88UZGRkY/T3Lo0KE77rijpKTk6CenTZu2aNGiPmv5888/X1hY+OCDD1LgAaAT8+cBxJUDjfaiZseUscNlBwGA+BX1KfQbN2685ZZbrFZrcnLypZdeeuONNw4fPlwI8c4779x77739PEljY+OVV14Zbu9nnXXWrbfeOm7cOCHEzp07r7766o6O3vZA2rBhQ2Fh4XF/DgAwFLPLX2ZyyU4BAN/zv7vrZEcAgLgW3QLv9/sfeOCBUCg0YsSIgwcP5uXlrVq1qr29/YYbbhBCLF26dNWqVf05z2OPPdbW1iaEWLhwYUVFxdKlSxsbG19++WUhRHl5+fPPP9/t3/J4PG+//fZdd90VuQ8EAAbxdZWF5aIAxJu8eltBk112CgCIX9Et8AsXLmxqago/+MEPfhB+Mi0tbfHixRMmTBBCvPbaa32epLKy8rPPPhNCPPnkk0e38aeeeurWW28VQrz99tsOh+Pov/K3v/1t6tSpJ5544syZM202W+Q+EAAYxNdsIAcgLr27p152BACIX9Et8CtXrhRCnHvuudddd93Rz2dlZYW79+7du81mc+8n+fzzzzVNS0pKevzxx7u8dP/99wshvF7vpk2bjn5+3759hYWFisLqygDQDY8S2t/IGBeAeMQgPAD0IroFfvv27UKIH//4x8e+dOONNwohVFXdsWNH7yfZtm2bEOK888475ZRTurx05ZVXZmVldR7T6c0336z51uLFi4/jEwCAAe2stSpBVXYKAOjee3kMwgNA96JY4Nva2sLLy02aNOnYVy+55JKkpCQhRHl5ee/nOXLkSE8nGTJkyOTJk489SU5OzunfGjNmzGA/AQAY09dVzJ8HEL/21DEIDwDdi2KBb2hoCD849dRTj301PT09JydHCFFf38cvWcPn6fYknc/3eRIAQFhQ1XbVsjgIgLjGIDwAdCuK+8C7XN9sUBSe5X6srKwsk8nkdDp7OYmmaR6Pp/eTCCF6P8kghLep6114fT7EM6fT6XA4PB5P508jgMJWt8XpkZ0C6CoYDKqqqmlacnLU97hF/NteadpYUDFp1FDZQQB0o7W1NXy5Dk+pRixFscB7vd7wgyFDhnR7QPj5cD/vic/n0zTtOE8yCM3NzX0eEwqFIvumiLhQKKSqaigU4l8W0GlXvUNjBznEn84fS34+Ebao2Pw/07ufgAlArvBX61AoRIGPvSgW+IyMjPCDnlaD9/v9Qoj09PReTtLZ24/nJIPQ2NjYy6vhefv9GaWHXHa7fejQodnZ2dnZ2bKzAPGieFNLWlqa7BRAV0lJSaqqpqWl8XUQYQctAWvK8AvGnCA7CICuwhOmxo0bxxU79qJY4IcNGxZ+4Ha7uz2g97nxYcnJyZmZmV6v93hOMgj9KecpKSmRfVNEXMpRZGcB4kK52d3i8POfW8ShpKSk8E8mP5/o9MaO2vduu5CfCCDepKSkaJqWkpLCFTv2onibWWcH7nY6ejAYNJlMQoixY8f25zw9zWkP34je50kAAEKIryrbZUcAgP4qbnZs5aoFAEeJboEPD4yH94HrorKyUlVVIcQPfvCD3s9zzjnn9HQSIURFRUV/TgIAEEJ8Xc0GcgD0ZM62aiWoyk4BAPEiugu9Xn755UKI7du3H/tS55PhY3oxbdo0IURRUdGxS81XVFS0trZ2HgMA6IXJ5S83syMDAD1pdviWFLLvDwB8I7oF/qabbhJC5OXllZWVdXlpwYIFQoh/+Zd/GT9+fH9OEggEPv744y4vzZ8/XwiRnp5+3XXXRSgyABjWV1UWlvcGoDvv7qm3eLpfzBgAEk10C/y9996bk5MjhJg5c6bP5+t8ft68eTt37hRCPPnkk0cf/8EHH9xxxx133HHH7t27O58877zzrr32WiHEc889V1dX1/l8QUHB66+/LoR44IEHTjrppKh+EAAwAG6AB6BHnkDonT31slMAQFyI4ir0Qohhw4a99dZbt99++7Zt2y644IKbb745Jydn8+bNmzdvFkLMmDHj1ltvPfr4/fv3f/LJJ0KIm2+++d/+7d86n58zZ87u3btNJtPFF1980003TZkyZdeuXWvWrPF6vaeffvqf//znqH4KADAAlz9Y0OSQnQIABuOz4uZbJp9y1shhsoMAgGTRLfBCiNtuu83pdD766KOVlZV//etfO5+//fbb33333X5uPHD22WevX7/+jjvuqKure//99zufv/DCC5cuXTpy5MjI5wYAY9lVawuEWAgKgC6pmnhje80bP50kOwgASJakxeSGyLa2tk8++aS0tNTr9ebm5t50002XXHLJsYdVVFSEt4U777zzRo0a1eVVr9e7cuXKXbt2Wa3W0aNHT58+/dprr01N7eN3EB0dHYWFhUKIiy+++IQTTojIxwn/3iE2/+hwPOx2u91uz87Ozs7Olp0FkOwPa8s2lZtlpwB6FAwGVVVNTU1NTo7u/X3Qrzd+Ouny07lrEpCvoaFB07Tx48ezD3wXMeiJMSrwBkOB1wsKPBCmBNWr5u32BEKygwA9osCjT2ecPGzRLy5KTaYwAJJR4HsSg57IfyMBwPjy6m20dwB6V21xf5TfIDsFAMhEgQcA4/uS9ecBGMI7efU1Vo/sFAAgDQUeAAxO1cT2GqvsFAAQAUpQfXlLBXcxAkhYFHgAMLjCJrvNE5CdAgAi40CjfeXBFtkpAEAOCjwAGBzz5wEYzJztNSaXX3YKAJCAAg8ABvd1lUV2BACIJJc/+Ncvq2SnAAAJKPAAYGSHTa5mh092CgCIsK2V7UwvApCAKPAAYGR8wQVgVK9srXT4WOADQGKhwAOAkX1FgQdgUO1u5fVtNbJTAEBMUeABwLDqbd4qCxsmAzCs1aWt68tMslMAQOxQ4AHAsJg/D8DwXt5aUd/hlZ0CAGKEAg8AhkWBB2B4HiX09LrDgZAqOwgAxAIFHgCMyezyl7Y5ZacAgKgra3PO3VUrOwUAxAIFHgCM6asqi6bJDgEAMfHx/sbt1RbZKQAg6ijwAGBMzJ8HkDg0Tfx5U3m7W5EdBACiiwIPAAbk8AUONNplpwCA2LF5As9uOKwy9QiAoVHgAcCAvq6yBFW+xQJILHvrO97aUSs7BQBEEQUeAAxoayX3ggJIRB/mN3xW3CI7BQBECwUeAIzGEwjl1dlkpwAAOV79sjK/oUN2CgCICgo8ABjN15UWhS2RASSqoKrNWnOoocMrOwgARB4FHgCMZivrzwNIbHZf8PFVpU5/UHYQAIgwCjwAGIovqO5h/jyAhFdr9fxxXVmI5TwBGAsFHgAMZWeN1RsIyU4BAPLtrrX99asq2SkAIJIo8ABgKFsqmD8PAN9YVtT8/76ulp0CACKGAg8AxqGE1J01bCAHAN9ZdKDxnT11slMAQGRQ4AHAOHbX2twK8+cB4Hve3l23YF+D7BQAEAEUeAAwji0VZtkRACAevbWzZllRs+wUAHC8KPAAYBBBVdtRw/rzANANTROvflm5oqRFdhAAOC4UeAAwiL31NocvIDsFAMQpTRMvb6lYebBVdhAAGDwKPAAYBOvPA0DvVE28sKn8zR01soMAwCBR4AHACFRNbKtm/XkA6NuCfQ2vflmpaprsIAAwYBR4ADCC/IYOm4f58wDQL0sLm59aW6aEVNlBAGBgKPAAYASsPw8AA7K1ov3xlaUett4EoCsUeADQPVUTX1Yyfx4ABiav3vZ/lxW1Ov2ygwBAf1HgAUD38hs6rB5FdgoA0J/DJtfdH+/fW88enAD0gQIPALq3mfnzADBYdl/wNysOLtjXIDsIAPSNAg8A+qZq4ivmzwPAcQip2ps7ap5eV+YLsqwdgLhGgQcAfWP+PABExMYj5l8tLWp2+GQHAYAeUeABQN82lTN/HgAio6zNeedH+9eVmWQHAYDuUeABQMdCqvZVVbvsFABgHG4l9OyGw0+tLXP4ArKzAEBXFHgA0LG99R02D18xASDCNpeb715YUNhklx0EAL6HAg8AOraF9ecBIDpaHL5ff1r8bl59SNVkZwGAb1DgAUCvgqr2ZSXz5wEgWkKqNm9X7S8WHThscsnOAgBCUOABQL/21tvsvqDsFABgcBVm932LC97cURMIsckcAMko8ACgV5tZfx4AYiKoagv2NdyzqKCszSk7C4CERoEHAF0KqtpXVRbZKQAggVS2u+9fUvj27jolyFA8ADko8ACgS3l1Ngfz5wEgtoKq9s6euts/2r+/kQXqAUhAgQcAXdpcwfJ1ACBHQ4d35qdFL22p8ARCsrMASCwUeADQn0BI/Zr58wAgj6aJz4pbbluQv7PGKjsLgARCgQcA/dlda3P4ArJTAECia3X6/3vlwafWlrEnCIDYoMADgP5sqmD9eQCIF5vLzbcu2LeVO5sARB8FHgB0Rgmp25g/DwDxxOoJ/H7NoafWlnV4mR4FIIoo8ACgM9urrW6FZZMAIO5sLjff9mH+tmp+xwogWijwAKAzG4+YZEcAAHTP6gn8dlXpsxsOe1mgHkAUUOABQE88gRArHgNAnFtXZrpnUcERk0t2EABGQ4EHAD35srLdH1RlpwAA9KHW6rn/k8IF+xpUTZOdBYBxUOABQE82HWH9eQDQByWovrmj5vFVpWz8CSBSKPAAoBsOXyCvvkN2CgDAAOyssd67uLDa4pEdBIARUOABQDc2l7cHQsyfBwCdaejw3rek4MtKNooHcLwo8ACgG5vKmT8PALrkUUK/X3PozR013BIP4HhQ4AFAHyweZX8j8+cBQK80TSzY1/DHdewwB2DwKPAAoA+bjphVhm0AQOc2l5t/vazI7gvKDgJAlyjwAKAPX7D+PAAYwqE216+WFppdftlBAOgPBR4AdKDN6S9pdchOAQCIjGqLZ+anxW1OOjyAgaHAA4AObDxiYtkjADCSOpv3waVF9Tav7CAA9IQCDwA6sOGwSXYEAECEtTh8Dy4tLDe7ZAcBoBsUeACIdzVWT7nZLTsFACDyrJ7Aw8tLaq0e2UEA6AMFHgDiHcPvAGBgHd7Aw8uLW7kfHkA/UOABIN5tKmf9eQAwMpNLeXTFQQd7ywHoCwUeAOJaSYuDJY4AwPCqLe5HV5R4AyHZQQDENQo8AMS1jWz/DgCJ4WCr8w9ry0Iqm44A6BEFHgDil6qJTRR4AEgYO2qsz39Rzr6hAHpCgQeA+LW33mbxKLJTAABiZ11Z2/t762WnABCnKPAAEL+YPw8ACejt3XW7am2yUwCIRxR4AIhTSkj9srJddgoAQKypmjZ7/eEmu092EABxhwIPAHFqe7XV5WdLIQBIRA5f4MnVh/xBVXYQAPGFAg8AcWrjEZPsCAAAacrNrhc3l8tOASC+UOABIB65ldDOGqvsFAAAmdaVmVaUtMhOASCOUOABIB5tLjczcxIA8NpXVYfaXLJTAIgXFHgAiEfMnwcACCGUoPrU2kOeQEh2EABxgQIPAHGn3a3kN3TITgEAiAvNdt/fvqqSnQJAXKDAA0Dc2XDYpGqyQwAA4saqg607WBgFAAUeAOLQ+sPMnwcAfM8Lm8rtPvYWBRIdBR4A4kuN1XPExHpFAIDvaXcrr2ytkJ0CgGQUeACIL+vKGH4HAHTjiyPmL1jiFEhsFHgAiCOaJjYyfx4A0INXtla2uxXZKQBIQ4EHgDhS0GRvdvhkpwAAxCm7L/jcxiMaC50CiYoCDwBxZANzIwEAvdpTZ2OtUyBhUeABIF4EQuqWcrPsFACAePf3bdUOX0B2CgASUOABIF5sr7ayRRAAoE9WjzJvd53sFAAkoMADQLxg/jwAoJ8+LWopbXXKTgEg1ijwABAXXP7gzhqr7BQAAH1QNe2VrZUqy9kBCYYCDwBxYVO52R9UZacAAOjGoTbnyoOtslMAiCkKPADEBZYUBgAM1Fs7am0eVrMDEggFHgDka3X6C5scslMAAHTG4Qu8uaNadgoAsUOBBwD5Nhw2cR8jAGAQVh9qK2rmV8BAoqDAA4B868qYPw8AGAxNE69sreC3wECCoMADgGRlbc5qi1t2CgCAXpWb3WsOtclOASAWKPAAIBnD7wCA4/TPXXXeQEh2CgBRR4EHAJlCqrbxCAUeAHBczC7/ogNNslMAiDoKPADItKvWamUHIADAcVuQ32DxKLJTAIguCjwAyMT8eQBARHiU0P/urpOdAkB0UeABQBqXP7it2iI7BQDAIFaUtFRbPLJTAIgiCjwASLOp3OwPqrJTAAAMQtXEWztrZKcAEEUUeACQhvnzAIDI2lZl2Vtvk50CQLRQ4AFAjma7r7DZLjsFAMBo/r6tWtU02SkARAUFHgDkWHfYxPcrAEDElZvdm8rbZacAEBUUeACQY31Zm+wIAABjent3bUjll8SAAVHgAUCC4mZHnc0rOwUAwJjqbd61LLMCGBEFHgAkWHeY71UAgCh6Z0+dwkYngOFQ4AEg1pSg+sURCjwAIIpaHL5Vpa2yUwCIMAo8AMTa9hqrwxeUnQIAYHDv5dX7GIQHjIUCDwCxtuYQy9cBAKKu3a0sL26WnQJAJFHgASCmrB5ld61VdgoAQEL4YG+DJxCSnQJAxFDgASCm1h82BdnaBwAQEx3ewJKCJtkpAEQMBR4AYmrtIZavAwDEzkf5jQ5fQHYKAJFBgQeA2DlicpWbXbJTAAASiNMfXFzAnfCAQVDgASB2WL4OABB7iw40svsJYAwUeACIkaCqbWD7dwBAzLmV0GLuhAcMgQIPADGyo8Zq83AXIgBAgsUFTdwJDxgABR4AYmRNaavsCACABOXyBz8p5E54QPco8AAQC3ZfcGetTXYKAEDiWnSgyennTnhA3yjwABAL68raAiFVdgoAQOJy+oPLihiEB/SNAg8AscD68wAA6Rbub/QEQrJTABg8CjwARF2VxXPExPbvAADJ7L7gp0UtslMAGDwKPABE3ecHWb4OABAXPt7f6GUQHtAtCjwARFdI1TYcZvt3AEBcsHqU5cUMwgN6RYEHgOjaVm2xeBTZKQAA+MaH+Q2+IOuqArpEgQeA6FrN8nUAgHhi9QRWljAID+gSBR4AosjqCeyqscpOAQDA93yY36AwCA/oEAUeAKJodWlrUNVkpwAA4HtMLoX9TQE9osADQBTx9QgAEJ8W5DeE+BUzoDcUeACIlqJmR43VIzsFAADdaLL7Nh4xy04BYGAo8AAQLZ+Xsv07ACB+vb+3XtUYhAf0hAIPAFHhDYQ2MbIBAIhjtVbP1op22SkADAAFHgCiYlO52RMIyU4BAEBv3t/bwBg8oCMUeACIitWlLF8HAIh35WbXjhqL7BQA+osCDwCRV2/zFjbbZacAAKBv7+XVy44AoL8o8AAQeasPtTEjEQCgCwdbnXvrbbJTAOgXCjwARJiqsf07AEBPGIQH9IICDwARtqPaYnb5ZacAAKC/9jfaC5u48wvQAQo8AETYSrZ/BwDozft7G2RHANA3CjwARFK7W9lZY5WdAgCAgdlVaz3U5pKdAkAfKPAAEEkrD7aGVNavAwDoz/t5dbIjAOgDBR4AIkbVtM+ZPw8A0Kevqy0VZrfsFAB6Q4EHgIjJq+totvtkpwAAYDA0TXywj+XogbhGgQeAiFlR0iI7AgAAg7e53Fxn88pOAaBHFHgAiAyrJ7Ct2iI7BQAAg6dq4oO9DMID8YsCDwCRsbq0NcjydQAAnVtX1tbQwSA8EKco8AAQAZomVrF8HQBA/1RNfJTfKDsFgO5R4AEgAvIbO+q5aRAAYAifl7a2Ov2yUwDoBgUeACJgJcvXAQCMIqhqH+5rkJ0CQDco8ABwvOy+4JeVLF8HADCOlaWtJheD8EDcocADwPFae6hNCamyUwAAEDFKUF3AIDwQfyjwAHBcNE0sL26WnQIAgAhbUcIgPBB3KPAAcFz2NXTUsXwdAMBwlJD6IcvRA3GGAg8Ax2VZEcPvAABj+qy4hUF4IK5Q4AFg8Mwu//Zqlq8DABiTElLZEx6IKxR4ABi85cUtQVWTnQIAgGhZziA8EE8o8AAwSEFVW1XaJjsFAABRxCA8EFco8AAwSFsrzGYGJQAARscgPBA/KPAAMEifFrfIjgAAQNQpIfXj/U2yUwAQggIPAINTbfEcaLTLTgEAQCwsL242uRTZKQBQ4AFgUNg9DgCQOPxB9d09dbJTAKDAA8DAeZTQujKWrwMAJJBVpa11Nq/sFECio8ADwICtLWtzKyHZKQAAiJ2Qqr2bxyA8IBkFHgAGbDnL1wEAEs/Gw+Zys0t2CiChUeABYGAONNor292yUwAAEGuqps3bxSA8IBMFHgAG5uP9jbIjAAAgx7ZqS3GzQ3YKIHFR4AFgAJodvh01FtkpAACQ5q2dNbIjAImLAg8AA7Bwf6OqyQ4BAIA8Bxrte+s7ZKcAEhQFHgD6y62E1hxi9zgAQKJ7a0eNxq+zARko8ADQXytKWtg9DgCAQ23Or6raZacAEhEFHgD6RdXE0sJm2SkAAIgLc7bXKEFVdgog4VDgAaBftlSYmx0+2SkAAIgLjR3eT4v5vTYQaxR4AOiXRQeaZEcAACCOvJvXYPcFZacAEgsFHgD6dqjNVdLCtrcAAHzH4Qu8u6dOdgogsVDgAaBvH+9vlB0BAIC4s7Soudrilp0CSCAUeADog8nl31phlp0CAIC4E1K1t3bUyk4BJBAKPAD0YUlBU1Blu1sAALqxrdqyr6FDdgogUVDgAaA3Ln9wRUmr7BQAAMSv17+u5hfdQGxQ4AGgN58UNjv9LLELAECPys2u1aX8shuIBQo8APTIGwgtKWT3OAAA+vDPXbVuJSQ7BWB8FHgA6NGKkhabJyA7BQAA8a7drfxzV63sFIDxUeABoHtBVVt0gOF3AAD6ZWlh88FWp+wUgMFR4AGge6tLW1udftkpAADQB1XTXtlayWp2QFRR4AGgG6omPtrfKDsFAAB6UtbmXFbULDsFYGQUeADoxqZyc73NKzsFAAA6M3dnjcnF/DUgWijwANCVpokP8xtkpwAAQH/cSuj1r6tlpwAMiwIPAF1tr7YcMblkpwAAQJc2lZt31FhlpwCMiQIPAF0tYPgdAIDj8MrWSm+AbeGByKPAA8D37K23FTU7ZKcAAEDHWhy+d/PqZacADIgCDwDf889ddbIjAACgex/lNx5otMtOARgNBR4AvrO1or2kheF3AACOl6ppz2084mEiPRBRFHgA+IaqiXm7GX4HACAymh2+v29jRXogkijwAPCNtYdaqy1u2SkAADCOz4pbtldbZKcAjIMCDwBCCBEIqSy3AwBAxP3PpgqrR5GdAjAICjwACCHE8uKWJrtPdgoAAIzG6lFe2lwhOwVgEBR4ABDeQOj9vQy/AwAQFV9VWdaVtclOARgBBR4AxOKCJqsnIDsFAACG9dqXVcx0A44fBR5AonP4Ah/vb5KdAgAAI3P6g0+sLvUFVdlBAH2jwANIdPP3NTp8DL8DABBdFWb3X7ZwMzxwXCjwABJaY4d3SQHD7wAAxMKaQ23Li1tkpwB0jAIPIKH99asqJcR0PgAAYuS1LyuLmh2yUwB6RYEHkLi+rrLsqLHKTgEAQAIJqtof1h5iZ3hgcCjwABKUP6j+v6+rZKcAACDhmFzKH9cdDqma7CCA/lDgASSoBfsa2M8GAAAp8hs63theIzsFoD8UeACJqNXp/zC/QXYKAAAS18IDjYsOsI4sMDAUeACJ6NWtlWxFCwCAXK9vq1pXZpKdAtATCjyAhLOnzrat2iI7BQAAiU7TxJ+/OJJXb5MdBNANCjyAxBIIqa9urZSdAgAACCFEUNV+v/rQEZNLdhBAHyjwABLL/+6pr+/wyk4BAAC+4VZCj60qbXGwsizQNwo8gARS3OJYsK9edgoAAPA9Zpf/NysOsjk80CcKPIBE4Q2Entt4hE1nAQCIQ7VWz6+WFplcftlBgLhGgQeQKF7/urrexuR5AADiVJ3N++AnRc3MpQd6RoEHkBDy6m0rDrbITgEAAHrT7PD936VFjaxWA/SAAg/A+Jz+4PMbj2hMngcAIO61Ov0PLi2qtrhlBwHiEQUegPG9tLnC5GJdHAAA9KHdrTy8vIQODxyLAg/A4NaXmTaVm2WnAAAAA9DuVh5cWryvoUN2ECC+UOABGFmt1fPKl5WyUwAAgAFz+AK/+axk5cFW2UGAOEKBB2BYDl/gt5+XuvxB2UEAAMBgBFXthU3lf/2qil1ggTAKPABjUjXtmQ1H2DcOAAC9W1LQ9NjKg24lJDsIIB8FHoAxvbG9ZmeNVXYKAAAQAbtqrb9ayhbxAAUegBGtKzN9vL9RdgoAABAx5WbXnR/t33iEhWmR0CjwAIymrM354uZy2SkAAECEuZXQ0+vKnt1w2BdUZWcB5KDAAzAUk8v/288P+fnvOgAABrWuzHTf4oIaq0d2EEACCjwA42h3Kw99Wmx2+WUHAQAAUVTZ7v4/iwrYYQ4JiAIPwCCsHuXh5SV1LDsPAEAC8AZCL2wq/82KkhZWtkMiocADMAKrR5n5aXG1xS07CAAAiJ3dtbZbF+Qv2NegauwUj4RAgQege9+2d+6FAwAg4fiC6ps7ah78pKiWu+KRACjwAPTN6gnM/LSE9g4AQCIrbnHcvfDAe3n1SoiFbGFkFHgAOtZk9zFzHgAACCH8QfWfu2pvXZD/dZVFdhYgWijwAPTqQKP9/iWFtHcAANCpye773eelDy9ndh6MiQIPQJcWHmic+WmR1aPIDgIAAOLO3nrb3QsPzNle7fQHZWcBIokCD0BnlJD6/Bflr39drbLcLAAA6EEgpH6U33jT+/sW7GvgxngYBgUegJ60OHwPfFK0urRVdhAAAKADDl/gzR01P5+fv67MxFZzMAAKPAB9UDWxuKDp9g/3l7U5ZWcBAAB60uLwPbvh8C8WFmyvZn076Fuq7AAA0LcKs/vFzeUHW6nuAABgkMrNrsdXlU7MyfrlpeOvmpgjOw4wGBR4AHHNH1QX5Dd8sLchwN1rAADguJVuv7OuAAAbYklEQVSbXU+tLbvgQNP9l4z/4Zkny44DDAwFHkCcCqna+sOmd/bUNdl9srMAAABDKWlx/Pbz0gvHZf/y0vGXn36S7DhAf1HgAcSdoKptPGJ6L6++3uaVnQUAABhWYZP90RX2iTlZd1887tpzRycnyQ4E9CVJYzHGgUtKShJC8I8u/tntdrvdnp2dnZ2dLTsL+kUJqSsPti7Y19Dm9MvOAiCmgsGgqqqpqanJyaywC0CCM04edt8l439yTk4KPb4vDQ0NmqaNHz8+XIvQKQY9kQI/GBR4vaDA60hZm3NtmWnjEZPNE5CdBYAEFHgA8WDksPSfTT7lzqnjThjCVOUeUeB7EoOeyM8lAJlMLv+WivbVpW3lZpfsLAAAING1u5X/3V23cH/jjPPH/J9/GT8qK112IuB7KPAAYk3VxMFWx64a665a62GTW2UyCwAAiCduJbSkoGl5ccs15466c+q4iTnDZCcCvkGBBxALqqbVWr3FLY68OltefYfDxzx5AAAQ1wIhdXVp6+rS1otOzb5j6rj/OHMkd8dDOgo8gGgxu/xHzO6DLY6SFufBVodbCclOBAAAMGAHGu0HGu2nDM+4dcrYmyeNGZ5Bh4I0LGI3GCxipxcsYhdLNk+gvsNb1e6ubHdXWdwV7W6HLyg7FADdYBE7ALqQnpJ81cSRP588dsrY4bKzSMMidj1hETsA8ajdrTTZfc12X5PDV2fz1Nu8DR0+ZsUDAADDU0LqujLTujLTWSOH/eyCU677wags1qtHDDECPxiMwOsFI/DHI6hqZpe/1elvcfhanf5Wh7/V6Wtx+JvsPiWkyk4HwGgYgQegRxmpyVeePfLG88dcdGp2csIMRzMC3xNG4AFEly+omlz+dpfS5vS3u5U2l9/k8puc/jaXYnErrA8PAADQC1/wmwH5sdkZN5w3+vofjB6XnSE7FIyMEfjBYAReLxiBF0K4lZDZ5bd6Au1uxeJRzC6l3e1vdwfa3YrZ5Xf6uU0dQFxgBB6AASQliQvHZl89MeeqiSNPGmrYPeQZge+JoUbgm5qaysrKPB5Pbm7ulClTBvcv2+FwFBUVWSyWUaNGTZ06NTMzMwZvCsQztxJqdys2j2LzBswupcMXsLiVdrdi+7ax+4NMdwcAAIgFTRMFTfaCJvtfv6q8ZPyJ/3lOzvSzTh6ekSY7F4wjFgW+trZ25syZX3zxReevInJzc19++eW77rqr/yfxeDxPPvnkBx984PV6w88MHz78kUceee6559LSuvm/RETeFJDO5gnYvAGrR2l3K1aPYvMGLW6l8xmbJ8Dt6AAAAPFG1URevS2v3vaXrckXjcv+0Zkn//DMk8ecMER2Luhe1KfQV1VVXXbZZRaLRQiRmpp6wgkn2Gy28EuvvfbaE0880Z+T+P3+H/3oR3l5eeE/5uTkmM3m8OMZM2asXLmyy3S7iLxpL5hCrxfxP4VeCant7vC09m8qutmlWD2K2a1YPQGbRwmq/JgBSAhMoQdgeOeOyvrhGSdfccZJ54zK0vWKd0yh70kMemLUC/xll122d+/e5OTkuXPn3nnnnVlZWfn5+b/4xS8qKiqSk5P3799/4YUX9nmSWbNmvfrqq0KImTNnzp49e9y4cZWVlb///e9XrFghhJgzZ86jjz4a8TftBQVeL+KkwFs9AbPL3+b0m92K2eVvcylml9/k8re7FXZKB4AwCjyAxDEiM+3S3BMvyx1x2Wkn6nFYngLfE90X+A0bNlx77bVCiL/85S+zZs3qfL6iouKSSy6x2+233HLLsmXLej+JxWLJzc31eDzXXXfd6tWrO/+77vV6r7jiiv37948dO7a2trZzIn1E3rR3FHi9iGWBd/gCbU6l1elrdfpNLqXN6W91+kwuxeTyK9yFDgB9ocADSEynnZh50akjpowdPmXs8PEj+ljhK05Q4Hui+wJ///33z58/f9SoUY2NjV3uVH/wwQffe++9jIwMq9Xa+1p0CxYsuO+++4QQRUVFkydPPvqljz/++J577hFCbN26dfr06RF8095R4PUi4gVeCamtTn+b09/m9Lc4fN8WdX+r0+dRQhF5CwBITBR4ADh5aPqUccMvGDP83NFZ547KOmFInO75TYHvie5Xod+0aZMQ4ic/+cmx68zNmDHjvffe8/l8O3bsuPrqq/s8yfjx47u0dyHEddddl5KSEgqFNm/e3FngI/KmSGRKSG1z+s0uJTyEbnb5Wxx+k8tvcilWjyI7HQAAAIzJ4lG2VrRvrWgP/3Fcdsa5o7LOHZV15shhE04aOi47M5m+nPCiWOCdTmdTU5MQYurUqce+2tm3Dx8+3HuXPnToUE8nOemkkyZPnlxQUHD48OHIvimMLbw1us0baHcrFrdi9QTCd6RzXzoAAADiRJPd12T3bfm2z6enJueOyDz9pKGnnZg5Ljtj7PCMscMzRp8wJIVan0iiWOCrqqrCD0477bRjXx0+fPiIESM6Ojo6D+tJdXV1TycJP19QUNB5kki9KfQoEFJd/pBLCTp8QYcv6PAHWyx2k90VSHL6REuHN2BxKx3eQIc3yNZrAAAA0BclqFa2uyvb3Uc/mZKcNDpryKgThozKGjJyWPrIYWkjh6WPHJY+IjMt/L/0FO5LMpQoFniHwxF+MGLEiG4PCHdpu93e+3mcTmfvJxFCdJ4kUm86ZcqU3g8QQrS0tPR5DLoVCKm+b9d1C2rCH9SEEEFV8wZCQoiAKvxBVdWEJxASQrgDqqppLn9IFcITUJWgqqia0x/yB1VfUPUGNU8g5FZCbkUNHLPjWigUUlU1OTk5JSUlth8RADAwwWAwfNMgd1QCQP8FhKjzK3UWZ08HZKQmDx+SkpWePCw9ZWhayrD05KFpKUPTkoemJackiWFpKUNSk9NTkoampSQniYy05NQkkZqcnJmWLIRITRZDUpKEECnJyUPTvvtFgMlk0jQtNTWVK3bsRbHAezye8IOMjIxuDwgvI+d2u7t9Nczn86mq2v+TRORNhRDFxcW9HyCECAQCfR6jX24lNIi1F0Kq8Ha34ro/pAX6PeidJERGshBCDE1JEkKMzAh378H8rLrdbrfbPWzYsGHDhg3irwMAYqajo0NRlBEjRqSnp8vOAgAJSBVCiJAaFCIohK/XdZ9sNl96SlLI5jm6wA9JTUob+GT+jNTkVG4BGIgoFvjU1G9OHgp1vzp3uAD3/mubgZ4kIm8qhCgsLOzl1fA28mPGjOn9JJDObrc7HI7hw4fL3QceANAns9ns8/lycnJ6+hU8ACBONDamapp26qmnMgIfe1Es8J1jnj6fr9sDws9nZWX1cpLU1NT09HRFUfp5koi8qejfFHqGCOJfenp6Wlpaeno6/7IAIM6lpaWFQiGu2AAQ/9LS0jRNS09Pp8DHXhSXNMjJyQk/aGtrO/ZVTdNMJpMQYuTIkf05T7cnEUK0trYefZJIvSkAAAAAAHEligV+woQJ4V+ih5eR76KpqUlRFCHEueee2/t5zjnnnJ5OIoSoqak5+iSRelMAAAAAAOJKFAt8SkpKeCL6nj17jn01Ly8v/OCiiy7q/TwXX3yxECI/P//Y29rb29vDG8J1niRSbwoAAAAAQFyJ7q6A119/vRBiy5YtFouly0tLly4VQuTm5vZ5t3n4JE6nc/369V1eWrZsWfjBjBkzIvumAAAAAADElegW+F/+8pfhJehmz5599PN5eXnLly8XQjz88MNHP79jx4558+bNmzcvPK4edsUVV5x//vlCiGeeecbv93c+39HR8dJLLwkhrrnmmjPOOGPQbwoAAAAAQPyLboEfP378E088IYSYN2/ePffcs3bt2r1797700kvXXHNNKBSaOHHib37zm6OPX7JkyUMPPfTQQw/t27fvu4jJyX/729+Sk5MLCwuvvPLKjz76qLi4eN68ef/+7//e2NiYmZn5yiuvHM+bAgAAAAAQ/5I0TYvqG6iqet9993300Uddnj/zzDPXr19/9tlnH/3kI4888o9//EMIsXjx4jvuuOPol+bOnfvYY4+Fd3HvlJWVtXDhwhtvvPF43nQQwvslRPsfHY6f3W632+3Z2dnsAw8Acc5kMvl8vlGjRrEPPADEuYaGBk3Txo8fzzZyXcSgJ0a9wIetXr36gw8+KC0t9Xq9ubm5N91000MPPXTsZuzr1q3bu3evEOKWW26ZNGlSl1eLiormzp27a9cuq9U6evTo6dOnP/LIIxMmTDjONx0ECrxeUOABQC8o8ACgFxT4nhinwBsMBV4vKPAAoBcUeADQCwp8T2LQE6N7DzwAAAAAAIgICjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOpMoOoGNJSUmyIwAAAAAAEgUj8AAAAAAA6ECSpmmyMwDR8txzzz3//PN/+tOfnnvuOdlZAAC9ueqqq7Zs2bJp06arrrpKdhYAQG8yMjL8fr/X683IyJCdJeEwAg8AAAAAgA5Q4AEAAAAA0AEKPAAAAAAAOkCBBwAAAABAByjwAAAAAADoAAUeAAAAAAAdoMADAAAAAKADFHgAAAAAAHSAAg8AAAAAgA4kaZomOwMAAAAAAOgDI/AAAAAAAOgABR4AAAAAAB2gwAMAAAAAoAMUeAAAAAAAdIACDwAAAACADlDgAQAAAADQAQo8AAAAAAA6QIEHAAAAAEAHKPAAAAAAAOgABR4AAAAAAB1IlR0AiLWmpqaysjKPx5ObmztlypSkpCTZiQDAsJYtWzZmzJgrrrii98MGdGUOBoMFBQXNzc3Dhw+fNGlSTk5ORCMDQCJqb2/fvHnz1KlTzznnnAie1uFwFBUVWSyWUaNGTZ06NTMzM4InT0yMwEOvVFUdO3bsyJ6tX7++y1+pra295pprxo8ff/XVV990001Tp049/fTTFy1aJCU/ABjenj17brvttldeeaWXYwZ6ZX7rrbfGjRt36aWX3nzzzVdeeeXYsWNvu+02s9kchfgAkEDefffdO++8c/Xq1T0d8LOf/ayXL96zZ8/ucrzH4/mv//qvMWPG/PCHP/zpT386bdq0MWPGPP3004FAIMofxeAYgYde1dfXt7S09HKA3+8/+o9VVVWXXXaZxWIRQqSmpp5wwgk2m62+vv7uu+9ubm5+4oknohsXABLPvHnzej9goFfmxx57bM6cOeHHI0eOtFqtwWBw2bJl+/bt27t3L0PxADA4fr9//vz5vR9z4MCB8OW6Wy6Xq8sJr7zyyry8vPAfc3JyzGazw+F46aWXSkpKVq5cmZzMQPIgUeChVxUVFeEHTz31VEpKyrEHdJn/c9ddd1ksluTk5Llz5955551ZWVn5+fm/+MUvKioqZs2addVVV1144YWxyA0ACcDv97/55psLFizo/bABXZnXrVsXbu+XX375vHnzLrjggvb29rlz5/7pT3+qra399a9//dlnn0X3UwGAEbW3tz/66KNHjhzp5Ri/39/Q0CCEuOuuuyZMmHDsAZdffvnRf3z22WfD7X3mzJmzZ88eN25cZWXl73//+xUrVqxevfqtt9569NFHI/ohEokG6NPcuXOFEKNHj+7PwZ3T6f/yl78c/Xx5eXl2drYQ4pZbbolOTABIIDab7YUXXrjzzjtHjRrV+U3j+uuv7/bggV6ZL7vsMiFEbm6uyWQ6+vmHH35YCJGUlFRSUhLxTwQARnXw4MGnn376hhtuyMjI6Lxiv/baa90efOjQofABZWVlfZ65vb196NChQojrrrsuFAp1Pu/xeC6++GIhxNixYxVFidgnSTBMXYBehUfgJ06c2J+DP/nkEyHEqFGjfvvb3x79/Nlnn33LLbcIIdasWeP1eqMQEwASSGtr6+zZsxcvXmwymfo8eEBX5rq6uvBgzu9+97suU+X/8Ic/CCE0TVu2bFmEPgcAGN/27dtffPHFNWvW+Hy+Pg8Of/FOSUk588wz+zx4zZo1Ho9HCPHyyy8fPVU+MzPzscceE0I0Nzfv2LFj8NETGwUeelVZWSmOmSffk02bNgkhfvKTn6SlpXV5acaMGUIIn8/HdQQAjtMpp5zywVF6v0QP6MocPlgIccMNN3Q5+NRTTw3PtN+8eXMkPgQAJITp06cffcXu/eDwF+8JEyYce8U+VviKPX78+MmTJ3d56brrrgvf+soVe9C4Bx56Ff5F4Lnnnmuz2TZs2FBeXp6ZmTl58uSLLrro6KmbQgin09nU1CSEmDp16rHnmT59evjB4cOHr7766ugHBwDDys7Ovu+++zr/OH/+/J5uqhzolTk8ezM7O/uMM87o9vjCwsLDhw8f7wcAgIRxzjnnHP1r1vvvv7+Xgzu/eAeDwQ0bNoQ3/pw0adKUKVPOOuusLgeHr9jdXt5POumkyZMnFxQUcMUeNAo8dElV1erqaiHEl19++cILL3R0dHS+NGTIkGeeeWbWrFmpqd/8eFdVVYUfnHbaaceeavjw4SNGjOjo6Og8DAAQbQO9Moev+bm5ud2eLXwSq9Xa0dExYsSIqCQGgAQWHoE3mUznnXde50rSYXffffecOXNOPvnkzmfCV+xuL+/h5wsKCvjiPWhMoYcu1dfXK4oihFi7dq3T6bzgggtuv/32Sy+9NCMjw+/3z549+9prr9U0LXyww+EIP+jpW134ebvdHpPsAIABX5nDx/d+sOBKDgDRES7te/furaioOO20037+859Pnz79xBNPFEIsXLhw0qRJZrO582Cn0yn44h01FHjoUudv/qZNm1ZbW1tcXLxkyZK8vLzy8vIf//jHQojNmze/88474WPCq2gIIY5eY/NomZmZQgi32x313AAAIcTAr8zh43s/WHAlB4Ao6NxD7pRTTtm5c2dtbe2nn366devWxsbG//7v/xZCtLa2hh8IIXw+n6qqgi/eUUOBhy6dcsopL7/88quvvrphw4ZTTz218/nx48evXLly3LhxQojZs2eHn+ycSx8Khbo9WyAQEEIkJSVFNzQA4FsDvTKHj+/9YMGVHACiwO/3v/jiiy+//PLWrVuP3u996NChf//732+88UYhxOLFi8O3vvPFO9q4Bx66NGnSpEmTJnX7UlZW1mOPPfbkk0+azeampqZx48YNGzYs/FJPm2SEn8/KyopSWgBAFwO9MoeP7/1gwZUcAKJg+PDhTz31VE+vPvPMM59//rkQoqCg4LzzzktNTU1PT1cUhS/eUcIIPAzoggsuCD8oKSkRQnTuGNzW1nbswZqmhfcrHjlyZKwCAkCiG+iVOXx8twcLIVpbW4UQSUlJR6+iBACIgfPPPz+82Xv4i7fo3xWbL96DRoGHAXV+gRsyZIgQYsKECenp6eLbJTG7aGpqCq+Hd+6558YwIwAktIFemcN7HTU0NHTOlj9aTU2NEGL8+PFDhw6NXmYAwLEyMzPD197wF2/x7RW728u7+PaKzRfvQaPAQ5d+97vfzZw589NPP+321fLy8vCD8DT7lJSUKVOmCCH27Nlz7MF5eXnhBxdddFFUsgIAjjHQK/PFF18shFAUpaCgoKfjuYwDQDS8/fbbM2fOfOmll7p9tbm52eVyiW+/eItvr9j5+fnH3gbf3t4e3kCOK/agUeChS83NzW+//fasWbPCq1x2sWrVKiHEKaec0jlF8/rrrxdCbNmyxWKxdDl46dKlQojc3NzwV0kAQGwM6Mo8ffr08MLF4ZeOVlZWdvDgQSFEeCElAEDEvf3227Nnzw4PnncR/uIthOi8Yocv706nc/369V0OXrZsWfjBjBkzopXV6Cjw0KU777xTCFFdXf2HP/yhy0uLFi0Kf7179tlnO5/85S9/GV5Oo3Np+rC8vLzly5cLIR5++OGohwYAHGVAV+bMzMz77rtPCDFv3ryjp2VqmjZr1iwhRE5Ozq233hqb5ACQUG655Za0tDRN0+69997w/U2dSktLn376aSHE7bffPnHixPCTV1xxxfnnny+EeOaZZ/x+f+fBHR0d4WH8a6655owzzojdBzCWlOeee052BmDAJk6cWFhYeOTIkZ07d27bti0YDLa1tW3evPnFF18MXxemTZv2j3/8o3ODiuzsbJ/Pt3379vz8/KqqqiFDhnR0dCxYsODhhx/2er0TJ058//3309LSpH4mADCa+fPn19XVTZw48a677jr21YFemadOnfrRRx91dHSsWLEiKSkpMzNz27Zts2bNWrt2rRBizpw5//qv/xq7zwYAxvL8888LIf7zP//z6I3iwoYOHTps2LAvvviivr5+yZIlqqpaLJbdu3e/++67v/71r91u94knnrhmzZrOheWTkpLOPPPMRYsWtbS0bN26NT09PSkpacWKFTNnzqyqqsrMzPzkk09Gjx4d609oGBqgTw6H44orruj2p/qee+6xWq1djg+FQvfcc8+xB5955pnl5eVSPgIAGNuPfvQjIcT111/f0wEDvTLv3Lmz896oTklJSbNnz47m5wAA4wtfUV977bWeDnj00UfDq813cemll5aWlh57/D/+8Y9jh8eysrJWrVoVzc9hfEnat/+2AD1at27dhx9+WF1d3draesYZZ0yePHnGjBlXX311T8evXr36gw8+KC0t9Xq9ubm5N91000MPPcRGlAAQDfPnz6+tre1pBL7TgK7MbW1tb7311saNG5ubm4cPH37xxRfPnDlz2rRp0fkEAJAowvOyux2B71RaWvrGG2+UlZXV1dXl5ORMmTJl2rRp9957b0pKSrfHFxUVzZ07d9euXVardfTo0dOnT3/kkUcmTJgQpY+QICjwAAAAAADoAIvYAQAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB0gAIPAAAAAIAOUOABAAAAANABCjwAAAAAADpAgQcAAAAAQAco8AAAAAAA6AAFHgAAAAAAHaDAAwAAAACgAxR4AAAAAAB04P8Dstb/ab5gFfUAAAAASUVORK5CYII=",
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fig = Figure()\n",
+ "ax = Axis(fig[1, 1], title=\"Distribution\")\n",
+ "density!(ax, iq)\n",
+ "fig"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "e37369d6",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\u001b[36m\u001b[1m┌ \u001b[22m\u001b[39m\u001b[36m\u001b[1mInfo: \u001b[22m\u001b[39mFound initial step size\n",
+ "\u001b[36m\u001b[1m└ \u001b[22m\u001b[39m ϵ = 0.025\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\r",
+ "\u001b[32mSampling: 0%|█ | ETA: 0:00:36\u001b[39m"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\r",
+ "\u001b[32mSampling: 100%|█████████████████████████████████████████| Time: 0:00:01\u001b[39m\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "Chains MCMC chain (400×14×1 Array{Float64, 3}):\n",
+ "\n",
+ "Iterations = 201:1:600\n",
+ "Number of chains = 1\n",
+ "Samples per chain = 400\n",
+ "Wall duration = 8.8 seconds\n",
+ "Compute duration = 8.8 seconds\n",
+ "parameters = μ, σ\n",
+ "internals = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size\n",
+ "\n",
+ "Summary Statistics\n",
+ " \u001b[1m parameters \u001b[0m \u001b[1m mean \u001b[0m \u001b[1m std \u001b[0m \u001b[1m mcse \u001b[0m \u001b[1m ess_bulk \u001b[0m \u001b[1m ess_tail \u001b[0m \u001b[1m rhat \u001b[0m \u001b[1m e\u001b[0m ⋯\n",
+ " \u001b[90m Symbol \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m \u001b[0m ⋯\n",
+ "\n",
+ " μ 99.2403 0.6727 0.0333 414.3604 324.9996 0.9993 ⋯\n",
+ " σ 14.4973 0.4440 0.0187 561.5709 284.5407 0.9976 ⋯\n",
+ "\u001b[36m 1 column omitted\u001b[0m\n",
+ "\n",
+ "Quantiles\n",
+ " \u001b[1m parameters \u001b[0m \u001b[1m 2.5% \u001b[0m \u001b[1m 25.0% \u001b[0m \u001b[1m 50.0% \u001b[0m \u001b[1m 75.0% \u001b[0m \u001b[1m 97.5% \u001b[0m\n",
+ " \u001b[90m Symbol \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m\n",
+ "\n",
+ " μ 97.9096 98.7663 99.2552 99.7769 100.4228\n",
+ " σ 13.6853 14.1811 14.5066 14.7917 15.3761\n"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#| output: false\n",
+ "#| code-fold: false\n",
+ "\n",
+ "@model function model_gaussian(x)\n",
+ " # Priors\n",
+ " μ ~ Uniform(0, 200)\n",
+ " σ ~ Uniform(0, 30)\n",
+ "\n",
+ " # Check against each datapoint\n",
+ " for i in 1:length(x)\n",
+ " x[i] ~ Normal(μ, σ)\n",
+ " end\n",
+ "end\n",
+ "\n",
+ "fit_gaussian = model_gaussian(iq)\n",
+ "chain_gaussian = sample(fit_gaussian, NUTS(), 400)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "31ba55af",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Chains MCMC chain (400×14×1 Array{Float64, 3}):\n",
+ "\n",
+ "Iterations = 201:1:600\n",
+ "Number of chains = 1\n",
+ "Samples per chain = 400\n",
+ "Wall duration = 8.8 seconds\n",
+ "Compute duration = 8.8 seconds\n",
+ "parameters = μ, σ\n",
+ "internals = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size\n",
+ "\n",
+ "Summary Statistics\n",
+ " \u001b[1m parameters \u001b[0m \u001b[1m mean \u001b[0m \u001b[1m std \u001b[0m \u001b[1m mcse \u001b[0m \u001b[1m ess_bulk \u001b[0m \u001b[1m ess_tail \u001b[0m \u001b[1m rhat \u001b[0m \u001b[1m e\u001b[0m ⋯\n",
+ " \u001b[90m Symbol \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m \u001b[0m ⋯\n",
+ "\n",
+ " μ 99.2403 0.6727 0.0333 414.3604 324.9996 0.9993 ⋯\n",
+ " σ 14.4973 0.4440 0.0187 561.5709 284.5407 0.9976 ⋯\n",
+ "\u001b[36m 1 column omitted\u001b[0m\n",
+ "\n",
+ "Quantiles\n",
+ " \u001b[1m parameters \u001b[0m \u001b[1m 2.5% \u001b[0m \u001b[1m 25.0% \u001b[0m \u001b[1m 50.0% \u001b[0m \u001b[1m 75.0% \u001b[0m \u001b[1m 97.5% \u001b[0m\n",
+ " \u001b[90m Symbol \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m\n",
+ "\n",
+ " μ 97.9096 98.7663 99.2552 99.7769 100.4228\n",
+ " σ 13.6853 14.1811 14.5066 14.7917 15.3761\n"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#| code-fold: false\n",
+ "\n",
+ "chain_gaussian"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "45fb1631",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "HPD\n",
+ " \u001b[1m parameters \u001b[0m \u001b[1m lower \u001b[0m \u001b[1m upper \u001b[0m\n",
+ " \u001b[90m Symbol \u001b[0m \u001b[90m Float64 \u001b[0m \u001b[90m Float64 \u001b[0m\n",
+ "\n",
+ " μ 97.8594 100.3178\n",
+ " σ 13.5687 15.2885\n"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "#| code-fold: false\n",
+ "\n",
+ "# Summary (95% CI)\n",
+ "hpd(chain_gaussian)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Julia 1.10.2",
+ "language": "julia",
+ "name": "julia-1.10"
+ },
+ "language_info": {
+ "file_extension": ".jl",
+ "mimetype": "application/julia",
+ "name": "julia",
+ "version": "1.10.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
\ No newline at end of file
diff --git a/content/.jupyter_cache/global.db b/content/.jupyter_cache/global.db
index f6c8110..60b74c8 100644
Binary files a/content/.jupyter_cache/global.db and b/content/.jupyter_cache/global.db differ
diff --git a/content/.quarto/_freeze/1_introduction/execute-results/html.json b/content/.quarto/_freeze/1_introduction/execute-results/html.json
index 82e5a35..828cd85 100644
--- a/content/.quarto/_freeze/1_introduction/execute-results/html.json
+++ b/content/.quarto/_freeze/1_introduction/execute-results/html.json
@@ -1,8 +1,8 @@
{
- "hash": "f1b09c48ebcc0d8b9c817ca76b3d37bf",
+ "hash": "13a6386ee9710a33c192a1e02a587307",
"result": {
"engine": "jupyter",
- "markdown": "# Fundamentals of Bayesian Modeling in Julia\n\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Very quick intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code.\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These **symbols** are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n:::\n\n\n### Generate Data from Normal Distribution\n\n::: {#0c15ea13 .cell execution_count=1}\n``` {.julia .cell-code}\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n:::\n\n\n::: {#6de958d5 .cell execution_count=2}\n``` {.julia .cell-code}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=3}\n![](1_introduction_files/figure-html/cell-3-output-2.svg){}\n:::\n:::\n\n\n### Recover Distribution Parameters with Turing\n\n::: {#76fbbced .cell execution_count=3}\n``` {.julia .cell-code}\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nmodel = model_gaussian(iq)\nsampling_results = sample(model, NUTS(), 400)\n\n# Summary (95% CI)\nsummarystats(sampling_results)\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Info: Found initial step size\n└ ϵ = 0.05\n\rSampling: 0%|█ | ETA: 0:00:32\rSampling: 100%|█████████████████████████████████████████| Time: 0:00:01\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n\n::: {.ansi-escaped-output}\n```{=html}\n
Summary Statistics\n parameters mean std mcse ess_bulk ess_tail rhat ⋯\n Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯\n μ 101.0966 0.6163 0.0285 464.5397 331.0063 1.0010 ⋯\n σ 14.5905 0.4758 0.0221 504.1965 231.1654 1.0362 ⋯\n 1 column omitted \n \n```\n:::\n\n:::\n:::\n\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n\n",
+ "markdown": "# Fundamentals of Bayesian Modeling in Julia\n\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Brief Intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code. \nWe won't be discussing things like plotting (as it highly depends on the package used).\n\n### Installing Julia and Packages\n\nTODO.\n\n\n### Julia Basics\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These *symbols* are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n- **Macros**: Some functions start with `@`. These are called macros and are used to manipulate the code before it is run. For example, `@time` will measure the time it takes to run the code that follows.\n- **Unicode**: Julia is a modern language to supports unicode characters, which are used a lot for mathematical operations. You can get the *mu* `μ` character by typing `\\mu` and pressing `TAB`.\n:::\n\n\n### Generate Data from Normal Distribution\n\n::: {#1f15e3d4 .cell execution_count=1}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n:::\n\n\n::: {#e3dbae1f .cell execution_count=2}\n``` {.julia .cell-code}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=3}\n![](1_introduction_files/figure-html/cell-3-output-2.svg){}\n:::\n:::\n\n\n### Recover Distribution Parameters with Turing\n\n::: {#e37369d6 .cell execution_count=3}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nfit_gaussian = model_gaussian(iq)\nchain_gaussian = sample(fit_gaussian, NUTS(), 400)\n```\n:::\n\n\nInspecting the chain variable will show various posterior statistics (including the mean, standard deviation, and diagnostic indices).\n\n::: {#31ba55af .cell execution_count=4}\n``` {.julia .cell-code code-fold=\"false\"}\nchain_gaussian\n```\n\n::: {.cell-output .cell-output-display execution_count=5}\n\n::: {.ansi-escaped-output}\n```{=html}\nChains MCMC chain (400×14×1 Array{Float64, 3}):\nIterations = 201:1:600\nNumber of chains = 1\nSamples per chain = 400\nWall duration = 8.8 seconds\nCompute duration = 8.8 seconds\nparameters = μ, σ\ninternals = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size\nSummary Statistics\n parameters mean std mcse ess_bulk ess_tail rhat e ⋯\n Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯\n μ 99.2403 0.6727 0.0333 414.3604 324.9996 0.9993 ⋯\n σ 14.4973 0.4440 0.0187 561.5709 284.5407 0.9976 ⋯\n 1 column omitted \nQuantiles\n parameters 2.5% 25.0% 50.0% 75.0% 97.5% \n Symbol Float64 Float64 Float64 Float64 Float64 \n μ 97.9096 98.7663 99.2552 99.7769 100.4228\n σ 13.6853 14.1811 14.5066 14.7917 15.3761\n \n```\n:::\n\n:::\n:::\n\n\nFor the purpose of this book, we will mostly focus on the 95% Credible Interval (CI), and we will assume that a parameter is ***\"significant\"*** if its CI does not include 0.\n\n::: {#45fb1631 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_gaussian)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ 97.8594 100.3178\n σ 13.5687 15.2885\n \n```\n:::\n\n:::\n:::\n\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n\n",
"supporting": [
"1_introduction_files\\figure-html"
],
diff --git a/content/.quarto/_freeze/1_introduction/figure-html/cell-3-output-2.svg b/content/.quarto/_freeze/1_introduction/figure-html/cell-3-output-2.svg
index f7b40c1..f600b86 100644
--- a/content/.quarto/_freeze/1_introduction/figure-html/cell-3-output-2.svg
+++ b/content/.quarto/_freeze/1_introduction/figure-html/cell-3-output-2.svg
@@ -2,197 +2,200 @@
-
+
-
+
-
+
-
-
-
-
+
-
+
+
+
+
+
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
+
+
+
-
-
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
-
+
+
+
+
-
-
+
+
diff --git a/content/.quarto/_freeze/4a_rt_descriptive/execute-results/html.json b/content/.quarto/_freeze/4a_rt_descriptive/execute-results/html.json
index 09524bf..aed7138 100644
--- a/content/.quarto/_freeze/4a_rt_descriptive/execute-results/html.json
+++ b/content/.quarto/_freeze/4a_rt_descriptive/execute-results/html.json
@@ -1,8 +1,8 @@
{
- "hash": "485397b899d19b6923f1cc82888d5854",
+ "hash": "f096b77d0a67e9d586fedbbaa7f759f7",
"result": {
"engine": "jupyter",
- "markdown": "# Descriptive Models\n\n![](https://img.shields.io/badge/status-up_to_date-green)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n::: {#def3e6bc .cell execution_count=2}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n1 1 Speed 0.7 false Low 2 1 Speed 0.392 true Very Low 3 1 Speed 0.46 false Very Low 4 1 Speed 0.455 false Very Low 5 1 Speed 0.505 true Low 6 1 Speed 0.773 false High 7 1 Speed 0.39 false High 8 1 Speed 0.587 true Low 9 1 Speed 0.603 false Low 10 1 Speed 0.435 false High
\n```\n:::\n:::\n\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n::: {#3b99ba16 .cell execution_count=3}\n``` {.julia .cell-code}\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n:::\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n::: {#60c565e5 .cell execution_count=4}\n``` {.julia .cell-code}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n \n```\n:::\n:::\n\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n::: {#fdfd5559 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#1e1a3766 .cell execution_count=6}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n σ 0.1652 0.1701\n μ_intercept 0.5071 0.5168\n μ_condition 0.1319 0.1457\n \n```\n:::\n\n:::\n:::\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n::: {#ba2b1593 .cell execution_count=7}\n``` {.julia .cell-code}\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n:::\n\n\n::: {#f02ce7d4 .cell fig-height='7' fig-width='10' execution_count=8}\n``` {.julia .cell-code}\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=8}\n```{=html}\n \n```\n:::\n:::\n\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n::: {#62ef3b30 .cell execution_count=9}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#4b488c39 .cell execution_count=10}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=10}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5081 0.5148\n μ_condition 0.1330 0.1446\n σ_intercept 0.1219 0.1271\n σ_condition 0.0714 0.0810\n \n```\n:::\n\n:::\n:::\n\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n::: {#7b17f69f .cell execution_count=11}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#fa8a4426 .cell execution_count=12}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=12}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5076 0.5148\n μ_condition 0.1316 0.1444\n σ_intercept 0.1223 0.1273\n σ_condition 0.0709 0.0803\n \n```\n:::\n\n:::\n:::\n\n\n::: {#ebf2b747 .cell execution_count=13}\n``` {.julia .cell-code}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=13}\n```{=html}\n \n```\n:::\n:::\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n\n::: {#07b852a9 .cell execution_count=14}\n``` {.julia .cell-code}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=14}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Specification\n\n::: {#dbebf70c .cell execution_count=15}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#76460a1b .cell execution_count=16}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=16}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n τ 0.1718 0.1792\n μ_intercept -1.1590 -1.1327\n μ_condition 0.3157 0.3430\n σ_intercept 0.3082 0.3228\n σ_condition 0.0327 0.0508\n \n```\n:::\n\n:::\n:::\n\n\n::: {#6a414e1f .cell execution_count=17}\n``` {.julia .cell-code}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=17}\n```{=html}\n \n```\n:::\n:::\n\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n::: {#a7089bbe .cell execution_count=18}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#bf20b174 .cell execution_count=19}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=19}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.3999 0.4062\n μ_condition 0.0618 0.0721\n σ_intercept 0.0381 0.0432\n σ_condition 0.0104 0.0185\n τ_intercept 0.1052 0.1130\n τ_condition 0.0641 0.0795\n \n```\n:::\n\n:::\n:::\n\n\n::: {#d4d95c07 .cell execution_count=20}\n``` {.julia .cell-code}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=20}\n```{=html}\n \n```\n:::\n:::\n\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n::: {#4df349b0 .cell execution_count=21}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n:::\n\n\n::: {#b9814b26 .cell execution_count=22}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_Wald; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=22}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n ν_intercept 5.0986 5.3197\n ν_condition -1.3387 -1.0493\n α_intercept 1.6605 1.7456\n α_condition 0.2060 0.3437\n τ_intercept 0.1808 0.1870\n τ_condition -0.0371 -0.0231\n \n```\n:::\n\n:::\n:::\n\n\n::: {#cf9d1165 .cell execution_count=23}\n``` {.julia .cell-code}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=23}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n::: {#398a7d32 .cell execution_count=24}\n``` {.julia .cell-code}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\n::: {.cell-output .cell-output-display execution_count=24}\n```\n\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\n┌────────────────┬──────────┬────────┬────────┐\n│ │ cv_elpd │ cv_avg │ weight │\n├────────────────┼──────────┼────────┼────────┤\n│ ExGaussian │ 0.00 │ 0.00 │ 1.00 │\n│ LogNormal │ -322.27 │ -0.03 │ 0.00 │\n│ Wald │ -379.85 │ -0.04 │ 0.00 │\n│ ScaledGaussian │ -2465.97 │ -0.26 │ 0.00 │\n│ Gaussian │ -2974.49 │ -0.31 │ 0.00 │\n└────────────────┴──────────┴────────┴────────┘\n```\n:::\n:::\n\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n",
+ "markdown": "# Descriptive Models\n\n![](https://img.shields.io/badge/status-up_to_date-brightgreen)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n::: {#def3e6bc .cell execution_count=2}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n1 1 Speed 0.7 false Low 2 1 Speed 0.392 true Very Low 3 1 Speed 0.46 false Very Low 4 1 Speed 0.455 false Very Low 5 1 Speed 0.505 true Low 6 1 Speed 0.773 false High 7 1 Speed 0.39 false High 8 1 Speed 0.587 true Low 9 1 Speed 0.603 false Low 10 1 Speed 0.435 false High
\n```\n:::\n:::\n\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n::: {#3b99ba16 .cell execution_count=3}\n``` {.julia .cell-code}\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n:::\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n::: {#60c565e5 .cell execution_count=4}\n``` {.julia .cell-code}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n \n```\n:::\n:::\n\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n::: {#fdfd5559 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#1e1a3766 .cell execution_count=6}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n σ 0.1652 0.1701\n μ_intercept 0.5071 0.5168\n μ_condition 0.1319 0.1457\n \n```\n:::\n\n:::\n:::\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n::: {#ba2b1593 .cell execution_count=7}\n``` {.julia .cell-code}\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n:::\n\n\n::: {#f02ce7d4 .cell fig-height='7' fig-width='10' execution_count=8}\n``` {.julia .cell-code}\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=8}\n```{=html}\n \n```\n:::\n:::\n\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n::: {#62ef3b30 .cell execution_count=9}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#4b488c39 .cell execution_count=10}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=10}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5081 0.5148\n μ_condition 0.1330 0.1446\n σ_intercept 0.1219 0.1271\n σ_condition 0.0714 0.0810\n \n```\n:::\n\n:::\n:::\n\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n::: {#7b17f69f .cell execution_count=11}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#fa8a4426 .cell execution_count=12}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=12}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5076 0.5148\n μ_condition 0.1316 0.1444\n σ_intercept 0.1223 0.1273\n σ_condition 0.0709 0.0803\n \n```\n:::\n\n:::\n:::\n\n\n::: {#ebf2b747 .cell execution_count=13}\n``` {.julia .cell-code}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=13}\n```{=html}\n \n```\n:::\n:::\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n\n::: {#07b852a9 .cell execution_count=14}\n``` {.julia .cell-code}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=14}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Specification\n\n::: {#dbebf70c .cell execution_count=15}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#76460a1b .cell execution_count=16}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=16}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n τ 0.1718 0.1792\n μ_intercept -1.1590 -1.1327\n μ_condition 0.3157 0.3430\n σ_intercept 0.3082 0.3228\n σ_condition 0.0327 0.0508\n \n```\n:::\n\n:::\n:::\n\n\n::: {#6a414e1f .cell execution_count=17}\n``` {.julia .cell-code}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=17}\n```{=html}\n \n```\n:::\n:::\n\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n::: {#a7089bbe .cell execution_count=18}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#bf20b174 .cell execution_count=19}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=19}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.3999 0.4062\n μ_condition 0.0618 0.0721\n σ_intercept 0.0381 0.0432\n σ_condition 0.0104 0.0185\n τ_intercept 0.1052 0.1130\n τ_condition 0.0641 0.0795\n \n```\n:::\n\n:::\n:::\n\n\n::: {#d4d95c07 .cell execution_count=20}\n``` {.julia .cell-code}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=20}\n```{=html}\n \n```\n:::\n:::\n\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n::: {#4df349b0 .cell execution_count=21}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n:::\n\n\n::: {#b9814b26 .cell execution_count=22}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_Wald; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=22}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n ν_intercept 5.0986 5.3197\n ν_condition -1.3387 -1.0493\n α_intercept 1.6605 1.7456\n α_condition 0.2060 0.3437\n τ_intercept 0.1808 0.1870\n τ_condition -0.0371 -0.0231\n \n```\n:::\n\n:::\n:::\n\n\n::: {#cf9d1165 .cell execution_count=23}\n``` {.julia .cell-code}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=23}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n::: {#398a7d32 .cell execution_count=24}\n``` {.julia .cell-code}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\n::: {.cell-output .cell-output-display execution_count=24}\n```\n\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\n┌────────────────┬──────────┬────────┬────────┐\n│ │ cv_elpd │ cv_avg │ weight │\n├────────────────┼──────────┼────────┼────────┤\n│ ExGaussian │ 0.00 │ 0.00 │ 1.00 │\n│ LogNormal │ -322.27 │ -0.03 │ 0.00 │\n│ Wald │ -379.85 │ -0.04 │ 0.00 │\n│ ScaledGaussian │ -2465.97 │ -0.26 │ 0.00 │\n│ Gaussian │ -2974.49 │ -0.31 │ 0.00 │\n└────────────────┴──────────┴────────┴────────┘\n```\n:::\n:::\n\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n\n## Other Models\n\nOther models are available to fit RT data, that we will demonstrate below for reference purposes.\nHowever, we won't be explaining them here, as we will revisit them in the next chapter in the context of choice modeling.\n\n### Linear Ballistic Accumulator (LBA)\n\nTODO.\n\n### Leaky Competing Accumulator (LCA)\n\nTODO.\n\n### Racing Diffusion Model (RDMRDM)\n\nTODO.\n\n",
"supporting": [
"4a_rt_descriptive_files\\figure-html"
],
diff --git a/content/.quarto/cites/index.json b/content/.quarto/cites/index.json
index 50ea669..f6889b0 100644
--- a/content/.quarto/cites/index.json
+++ b/content/.quarto/cites/index.json
@@ -1 +1 @@
-{"4a_rt_descriptive.qmd":["wagenmakers2008diffusion","heathcote2012linear","theriault2024check","lo2015transform","schramm2019reaction","wagenmakers2005relation","limpert2001log","balota2011moving","matzke2009psychological","hohle1965inferred","kieffaber2006switch","matzke2009psychological","schwarz2001ex","heathcote2004fitting","anders2016shifted"],"5_individual.qmd":[],"2_predictors.qmd":[],"3_scales.qmd":[],"references.qmd":[],"1_introduction.qmd":[],"index.qmd":[],"4b_rt_generative.qmd":[],"4_rt.qmd":["wagenmakers2008diffusion","heathcote2012linear","theriault2024check","lo2015transform","schramm2019reaction","balota2011moving","matzke2009psychological","hohle1965inferred","kieffaber2006switch","matzke2009psychological","schwarz2001ex","heathcote2004fitting","anders2016shifted"],"4_1_Normal.qmd":["wagenmakers2008diffusion","theriault2024check","lo2015transform","schramm2019reaction"]}
+{"4_rt.qmd":["wagenmakers2008diffusion","heathcote2012linear","theriault2024check","lo2015transform","schramm2019reaction","balota2011moving","matzke2009psychological","hohle1965inferred","kieffaber2006switch","matzke2009psychological","schwarz2001ex","heathcote2004fitting","anders2016shifted"],"2_predictors.qmd":[],"index.qmd":[],"5_individual.qmd":[],"4a_rt_descriptive.qmd":["wagenmakers2008diffusion","heathcote2012linear","theriault2024check","lo2015transform","schramm2019reaction","wagenmakers2005relation","limpert2001log","balota2011moving","matzke2009psychological","hohle1965inferred","kieffaber2006switch","matzke2009psychological","schwarz2001ex","heathcote2004fitting","anders2016shifted"],"references.qmd":[],"4b_rt_generative.qmd":[],"1_introduction.qmd":[],"4_1_Normal.qmd":["wagenmakers2008diffusion","theriault2024check","lo2015transform","schramm2019reaction"],"3_scales.qmd":[]}
diff --git a/content/.quarto/idx/1_introduction.qmd.json b/content/.quarto/idx/1_introduction.qmd.json
index cc89c2c..6491d54 100644
--- a/content/.quarto/idx/1_introduction.qmd.json
+++ b/content/.quarto/idx/1_introduction.qmd.json
@@ -1 +1 @@
-{"title":"Fundamentals of Bayesian Modeling in Julia","markdown":{"headingText":"Fundamentals of Bayesian Modeling in Julia","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Very quick intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code.\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These **symbols** are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n:::\n\n\n### Generate Data from Normal Distribution\n\n```{julia}\n#| output: false\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n\n```{julia}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n### Recover Distribution Parameters with Turing\n\n```{julia}\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nmodel = model_gaussian(iq)\nsampling_results = sample(model, NUTS(), 400)\n\n# Summary (95% CI)\nsummarystats(sampling_results)\n```\n\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"jupyter"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"1_introduction.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
+{"title":"Fundamentals of Bayesian Modeling in Julia","markdown":{"headingText":"Fundamentals of Bayesian Modeling in Julia","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Brief Intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code. \nWe won't be discussing things like plotting (as it highly depends on the package used).\n\n### Installing Julia and Packages\n\nTODO.\n\n\n### Julia Basics\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These *symbols* are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n- **Macros**: Some functions start with `@`. These are called macros and are used to manipulate the code before it is run. For example, `@time` will measure the time it takes to run the code that follows.\n- **Unicode**: Julia is a modern language to supports unicode characters, which are used a lot for mathematical operations. You can get the *mu* `μ` character by typing `\\mu` and pressing `TAB`.\n:::\n\n\n### Generate Data from Normal Distribution\n\n```{julia}\n#| output: false\n#| code-fold: false\n\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n\n```{julia}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n### Recover Distribution Parameters with Turing\n\n```{julia}\n#| output: false\n#| code-fold: false\n\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nfit_gaussian = model_gaussian(iq)\nchain_gaussian = sample(fit_gaussian, NUTS(), 400)\n```\n\nInspecting the chain variable will show various posterior statistics (including the mean, standard deviation, and diagnostic indices).\n\n```{julia}\n#| code-fold: false\n\nchain_gaussian\n```\n\nFor the purpose of this book, we will mostly focus on the 95% Credible Interval (CI), and we will assume that a parameter is ***\"significant\"*** if its CI does not include 0.\n\n```{julia}\n#| code-fold: false\n\n# Summary (95% CI)\nhpd(chain_gaussian)\n```\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"jupyter"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"1_introduction.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
diff --git a/content/.quarto/idx/4a_rt_descriptive.qmd.json b/content/.quarto/idx/4a_rt_descriptive.qmd.json
index 3bf38d0..9f387e5 100644
--- a/content/.quarto/idx/4a_rt_descriptive.qmd.json
+++ b/content/.quarto/idx/4a_rt_descriptive.qmd.json
@@ -1 +1 @@
-{"title":"Descriptive Models","markdown":{"headingText":"Descriptive Models","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-up_to_date-green)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n```{julia}\n#| code-fold: false\n\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n```{julia}\n#| output: false\n\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n```{julia}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n```{julia}\n#| output: false\n\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n\n```{julia}\n#| fig-width: 10\n#| fig-height: 7\n\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n```{julia}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n\n### Interpretation\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n\n```{julia}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n\n### Interpretation\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_Wald; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n```{julia}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"jupyter"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"4a_rt_descriptive.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
+{"title":"Descriptive Models","markdown":{"headingText":"Descriptive Models","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-up_to_date-brightgreen)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n```{julia}\n#| code-fold: false\n\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n```{julia}\n#| output: false\n\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n```{julia}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n```{julia}\n#| output: false\n\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n\n```{julia}\n#| fig-width: 10\n#| fig-height: 7\n\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n```{julia}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n\n### Interpretation\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n\n```{julia}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n\n### Interpretation\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n```{julia}\n#| code-fold: false\n#| output: false\n\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n\n```{julia}\n#| code-fold: false\n\nhpd(chain_Wald; alpha=0.05)\n```\n\n```{julia}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n```{julia}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n\n## Other Models\n\nOther models are available to fit RT data, that we will demonstrate below for reference purposes.\nHowever, we won't be explaining them here, as we will revisit them in the next chapter in the context of choice modeling.\n\n### Linear Ballistic Accumulator (LBA)\n\nTODO.\n\n### Leaky Competing Accumulator (LCA)\n\nTODO.\n\n### Racing Diffusion Model (RDMRDM)\n\nTODO.","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"jupyter"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"4a_rt_descriptive.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
diff --git a/content/.quarto/idx/4b_rt_generative.qmd.json b/content/.quarto/idx/4b_rt_generative.qmd.json
index 4e50dcd..f2fc401 100644
--- a/content/.quarto/idx/4b_rt_generative.qmd.json
+++ b/content/.quarto/idx/4b_rt_generative.qmd.json
@@ -1 +1 @@
-{"title":"Generative Models","markdown":{"headingText":"Generative Models","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-WIP-orange)\n\nIn this chapter, we will move away from statistical models that **describe** the data to models of **data-generation processes**.\n\n## Evidence Accumulation\n\nIn the previous chapter, we introduced the **Wald** distribution and its parameters, *nu* $\\nu$ (drift rate) and *alpha* $\\alpha$ (threshold).\nThis distribution appears to have been first derived in 1900 to model **the time a stock reaches a certain price** (a *threshold* price) for the first time, and used in 1915 by Schrödinger as the time to first passage of a threshold of a Brownian motion (i.e., a random walk).\n\nA random walk describes a path consisting of a succession of random steps. It has been used by Francis Galton in 1894 to illustrate the *Central Limit Theorem*, and is now known as the **Galton Board**. The Galton Board is a physical model of a random walk, where balls are dropped from the top and bounce left or right at each peg until they reach the bottom. The distribution of the final position of the balls is a normal distribution.\n\n![](media/rt_galtonboard.gif)\n\n::: {.callout-caution}\nTODO: Replace with my own video.\n:::\n\nIn the figure below, we can see a computer simulation illustrating the same concept, with \"time\" being displayed on the x-axis. All iterations start at *y*=0, and change by -1 or +1 at each time step, until it reaches a threshold of *t* = 0.7 seconds.\n\n![](media/rt_randomwalk.gif)\n\n\nRandom walks are used to model a wide range of phenomena, such as the movement of particules and molecules, the stock market, the behavior of animals and, crucially, **cognitive processes**.\nFor instance, it can be used to approximate **evidence accumulation**: the idea that a decision maker (be it a Human or any other system) accumulates evidence in a \"stochastic\" (i.e., random) fashion over time until a certain threshold is reached, at which point a decision is made.\n\n## Wald Distribution (Revisited)\n\nThis is how the Wald distribution is actually **generated**. It corresponds to the distribution of times that it takes for a stochastic process to reach a certain **threshold** $\\alpha$ (a certain amount of \"evidence\").\nThe twist is that the process underlying this model is a random walk with a **drift rate** $\\nu$, which corresponds to the average amount of evidence accumulated per unit of time. \nIn other words, the **drift rate** $\\nu$ is the \"slope\" of the evidence accumulation process, representing the **strength of the evidence** (or the **speed** by which the decision maker accumulates evidence).\nThe **threshold** $\\alpha$ is the amount of evidence required to reach a decision (\"decision\" typically meaning making a response).\n\n![](media/rt_wald2.gif)\n\n> In this figure, the red line at 0 represents the non-decision time *tau* $\\tau$. The dotted red line corresponds to the *threshold* $\\alpha$, and the *drift rate* $\\nu$ is the slope of the black line. The time at which each individual random accumulator crosses the threshold forms a Wald distribution.\n\nAs you can see, the Wald distribution belongs to a family of models thata do not merely attempt at describing the empirical distributions by tweaking and convolving distributions (like the ExGaussian or LogNormal models). Instead, their parameters are characterizing the **data generation process**. \n\n::: {.callout-important}\n\nWhile such \"generative\" models offer potential insights into the cognitive processes underlying the data, they inherently make **strong assumptions** about said underlying process (for instance, that the data of a task can be approximated by a stochastic evidence accumulation process). It is thus crucial to always keep in mind the limitations and assumptions of the models we use. Don't forget, **with great power comes great responsability.**\n:::\n\n\n## Drift Diffusion Model (DDM)\n\nUse DDM as a case study to introduce generative models\n\n- [**Drift Diffusion Model (DDM) in R: A Tutorial**](https://dominiquemakowski.github.io/easyRT/articles/ddm.html)\n\n## Other Models (LBA, LNR)\n\n## Including Random Effects\n\nTODO.\n\n## Additional Resources\n\n- [**Lindelov's overview of RT models**](https://lindeloev.github.io/shiny-rt/): An absolute must-read.\n- [**De Boeck & Jeon (2019)**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.00102/full): A paper providing an overview of RT models.\n- [https://github.com/vasishth/bayescogsci](https://github.com/vasishth/bayescogsci)\n","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"4b_rt_generative.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
+{"title":"Generative Models","markdown":{"headingText":"Generative Models","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-WIP-orange)\n\nIn this chapter, we will move away from statistical models that **describe** the data to models of **data-generation processes**.\n\n## Evidence Accumulation\n\nIn the previous chapter, we introduced the **Wald** distribution and its parameters, *nu* $\\nu$ (drift rate) and *alpha* $\\alpha$ (threshold).\nThis distribution appears to have been first derived in 1900 to model **the time a stock reaches a certain price** (a *threshold* price) for the first time, and used in 1915 by Schrödinger as the time to first passage of a threshold of a Brownian motion (i.e., a random walk).\n\nA random walk describes a path consisting of a succession of random steps. It has been used by Francis Galton in 1894 to illustrate the *Central Limit Theorem*, and is now known as the **Galton Board**. The Galton Board is a physical model of a random walk, where balls are dropped from the top and bounce left or right at each peg until they reach the bottom. The distribution of the final position of the balls is a normal distribution.\n\n![](media/rt_galtonboard.gif)\n\n::: {.callout-caution}\nTODO: Replace with my own video.\n:::\n\nIn the figure below, we can see a computer simulation illustrating the same concept, with \"time\" being displayed on the x-axis. All iterations start at *y*=0, and change by -1 or +1 at each time step, until it reaches a threshold of *t* = 0.7 seconds.\n\n![](media/rt_randomwalk.gif)\n\n\nRandom walks are used to model a wide range of phenomena, such as the movement of particules and molecules, the stock market, the behavior of animals and, crucially, **cognitive processes**.\nFor instance, it can be used to approximate **evidence accumulation**: the idea that a decision maker (be it a Human or any other system) accumulates evidence in a \"stochastic\" (i.e., random) fashion over time until a certain threshold is reached, at which point a decision is made.\n\n## Wald Distribution (Revisited)\n\nThis is how the Wald distribution is actually **generated**. It corresponds to the distribution of times that it takes for a stochastic process to reach a certain **threshold** $\\alpha$ (a certain amount of \"evidence\").\nThe twist is that the process underlying this model is a random walk with a **drift rate** $\\nu$, which corresponds to the average amount of evidence accumulated per unit of time. \nIn other words, the **drift rate** $\\nu$ is the \"slope\" of the evidence accumulation process, representing the **strength of the evidence** (or the **speed** by which the decision maker accumulates evidence).\nThe **threshold** $\\alpha$ is the amount of evidence required to reach a decision (\"decision\" typically meaning making a response).\n\n![](media/rt_wald2.gif)\n\n> In this figure, the red line at 0 represents the non-decision time *tau* $\\tau$. The dotted red line corresponds to the *threshold* $\\alpha$, and the *drift rate* $\\nu$ is the slope of the black line. The time at which each individual random accumulator crosses the threshold forms a Wald distribution.\n\nAs you can see, the Wald distribution belongs to a family of models thata do not merely attempt at describing the empirical distributions by tweaking and convolving distributions (like the ExGaussian or LogNormal models). Instead, their parameters are characterizing the **data generation process**. \n\n::: {.callout-important}\n\nWhile such \"generative\" models offer potential insights into the cognitive processes underlying the data, they inherently make **strong assumptions** about said underlying process (for instance, that the data of a task can be approximated by a stochastic evidence accumulation process). It is thus crucial to always keep in mind the limitations and assumptions of the models we use. Don't forget, **with great power comes great responsability.**\n:::\n\n\n## Drift Diffusion Model (DDM)\n\nInterestingly, the **Wald** model is actually a special case of a more general type called the **Drift Diffusion Model (DDM)** (named as such because the evidence accumulation is assumed to be a \"diffusion\" process, i.e., a random walk). \nOne of the main difference is that in the Wald model, the drift rate $\\nu$ must be *positive*, as it tracks the time taken by the diffusion processes to reach only one \"positive\" threshold $\\alpha$.\n\nBut what happens if we relax this and allow the drift rate to be null or negative? Many traces might never reach the upper threshold, and might instead reach high \"negative\" values.\n\nDrift Diffusion Models are useful to **jointly model RTs and a binary outcome**, such as 2 different choices or accuracy (i.e., \"correct\" vs. \"error\").\n\n![](media/rt_ddm.gif)\n\n\nThe parameters are:\n\n- **Nu** $\\nu$ : The drift rate (also sometimes denoted *delta* $\\delta$), representing the average slope of the accumulation process towards the boundaries. The larger the (absolute value of the) drift rate, the more effective the evidence accumulation for the corresponding response option. A drift rate close to 0 suggests an ambiguous stimulus. Typical range: [-5, 5].\n- **Alpha** $\\alpha$ : The boundary separation threshold is the distance between the two decision bounds (lower bound being at 0 and upper bound at *alpha* $\\alpha$). It has been interpreted as a measure of response caution (i.e., of speed-accuracy trade-off, with high *alpha* $\\alpha$ being related to high accuracy). It represents the amount of evidence that is needed to make a response. Typical range: [0.5, 2].\n- **Beta** $\\beta$ : The initial bias towards any of the responses. The starting point of the accumulation process (in percentage of *alpha* $\\alpha$: if $\\alpha = 2.0$ and $\\beta = 0.5$, then the actual starting point is $2.0*0.5=1$). Typical range: [0.3, 0.7].\n- **Tau** $\\tau$ : The non-decision time. It represents all non-decisional process, such as stimulus encoding, motor processes, etc. Typical range (in seconds): [0.1, 0.5].\n\n\nThis basic model is sometimes referred to as a Wiener model, as expanded versions of the DDM exist with additional parameters (e.g., variability of the drift rate).\n\n\n## Linear Ballistic Accumulator (LBA)\n\nTODO.\n\n## Other Models (LNR, RDM)\n\nTODO.\n\n## Including Random Effects\n\n### Random Intercept\n\nTODO.\n\n### Random Slopes\n\nTODO.\n\n### Performance Tips\n\nTODO.\n\n## Additional Resources\n\n- [**Lindelov's overview of RT models**](https://lindeloev.github.io/shiny-rt/): An absolute must-read.\n- [**De Boeck & Jeon (2019)**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.00102/full): A paper providing an overview of RT models.\n- [https://github.com/vasishth/bayescogsci](https://github.com/vasishth/bayescogsci)\n","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"4b_rt_generative.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
diff --git a/content/.quarto/idx/5_individual.qmd.json b/content/.quarto/idx/5_individual.qmd.json
new file mode 100644
index 0000000..038b49b
--- /dev/null
+++ b/content/.quarto/idx/5_individual.qmd.json
@@ -0,0 +1 @@
+{"title":"Individual Differences","markdown":{"headingText":"Individual Differences","containsRefs":false,"markdown":"\n![](https://img.shields.io/badge/status-not_started-red)\n\n1. Task reliability (Signal to Noise Ratio)\n1. Extracting Individual Parameters From mixed models\n2. As prior-informed individual Bayesian models","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"5_individual.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
diff --git a/content/.quarto/idx/index.qmd.json b/content/.quarto/idx/index.qmd.json
index 3343786..1b7bf9b 100644
--- a/content/.quarto/idx/index.qmd.json
+++ b/content/.quarto/idx/index.qmd.json
@@ -1 +1 @@
-{"title":"Preface","markdown":{"headingText":"Preface","headingAttr":{"id":"","classes":["unnumbered"],"keyvalue":[]},"containsRefs":false,"markdown":"\n## Why Julia?\n\n[**Julia**](https://julialang.org/) - the new cool kid on the scientific block - is a modern programming language with many benefits when compared with R or Python.\nImportantly, it is currently the only language in which we can fit all the cognitive models under a Bayesian framework using a unified interface like [**Turing**](https://turing.ml/) and [**SequentialSamplingModels**](https://github.com/itsdfish/SequentialSamplingModels.jl).\n\n## Why Bayesian?\n\nUnfortunately, cognitive models often involve distributions for which Frequentist estimations are not yet implemented, and usually contain a lot of parameters (due to the presence of **random effects**), which makes traditional algorithms fail to converge.\nSimply put, the Bayesian approach is the only one currently robust enough to fit these somewhat models.\n\n## The Plan\n\nAs this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable to keep up with the latest best practices. \n\n- [ ] Decide on the framework to build the book in a reproducible and collaborative manner (Quarto?)\n- [ ] Set up the infrastructure to automatically build it using GitHub actions and host it on GitHub pages\n- [ ] Write the content of the book\n- [ ] Referencing\n - Add Zenodo DOI and reference (but how to deal with evolving author? Through versioning?)\n - Publish a paper to present the book project ([JOSE](https://jose.theoj.org/))?\n\n\n## Looking for Coauthors\n\nThis project can only be achieved by a team, and I suspect no single person has currently all the skills and knowledge to cover all the content. We need many people who have strengths in various aspects, such as Julia/Turing, theory, writing, making plots etc.\nMost importantly, this project can serve as a way for us to learn more about this approach to psychological science. \n\n**If you are *interested* in the project, you can let us know by [opening an issue](https://github.com/DominiqueMakowski/CognitiveModels/issues) or getting in touch.**","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"index.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
+{"title":"Preface","markdown":{"headingText":"Preface","headingAttr":{"id":"","classes":["unnumbered"],"keyvalue":[]},"containsRefs":false,"markdown":"\n## Why Julia?\n\n[**Julia**](https://julialang.org/) - the new cool kid on the scientific block - is a modern programming language with many benefits when compared with R or Python.\nImportantly, it is currently the only language in which we can fit all the cognitive models under a Bayesian framework using a unified interface like [**Turing**](https://turing.ml/) and [**SequentialSamplingModels**](https://github.com/itsdfish/SequentialSamplingModels.jl).\n\n## Why Bayesian?\n\nUnfortunately, cognitive models often involve distributions for which Frequentist estimations are not yet implemented, and usually contain a lot of parameters (due to the presence of **random effects**), which makes traditional algorithms fail to converge.\nSimply put, the Bayesian approach is the only one currently robust enough to fit these somewhat models.\n\n## Looking for Coauthors\n\nAs this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable by contributors to keep up with the latest best practices. \n\nThis project can only be achieved by a team, and I suspect no single person has currently all the skills and knowledge to cover all the content. We need many people who have strengths in various aspects, such as Julia/Turing, theory, writing, making plots etc.\nMost importantly, this project can serve as a way for us to learn more about this approach to psychological science. \n\n**If you are *interested* in the project, you can let us know by [opening an issue](https://github.com/DominiqueMakowski/CognitiveModels/issues) or getting in touch.**","srcMarkdownNoYaml":""},"formats":{"html":{"identifier":{"display-name":"HTML","target-format":"html","base-format":"html"},"execute":{"fig-width":7,"fig-height":5,"fig-format":"retina","fig-dpi":96,"df-print":"default","error":false,"eval":true,"cache":true,"freeze":"auto","echo":true,"output":true,"warning":true,"include":true,"keep-md":false,"keep-ipynb":false,"ipynb":null,"enabled":null,"daemon":null,"daemon-restart":false,"debug":false,"ipynb-filters":[],"ipynb-shell-interactivity":null,"plotly-connected":true,"execute":true,"engine":"markdown"},"render":{"keep-tex":false,"keep-typ":false,"keep-source":false,"keep-hidden":false,"prefer-html":false,"output-divs":true,"output-ext":"html","fig-align":"default","fig-pos":null,"fig-env":null,"code-fold":true,"code-overflow":"scroll","code-link":false,"code-line-numbers":false,"code-tools":false,"tbl-colwidths":"auto","merge-includes":true,"inline-includes":false,"preserve-yaml":false,"latex-auto-mk":true,"latex-auto-install":true,"latex-clean":true,"latex-min-runs":1,"latex-max-runs":10,"latex-makeindex":"makeindex","latex-makeindex-opts":[],"latex-tlmgr-opts":[],"latex-input-paths":[],"latex-output-dir":null,"link-external-icon":false,"link-external-newwindow":false,"self-contained-math":false,"format-resources":[],"notebook-links":true},"pandoc":{"standalone":true,"wrap":"none","default-image-extension":"png","to":"html","output-file":"index.html"},"language":{"toc-title-document":"Table of contents","toc-title-website":"On this page","related-formats-title":"Other Formats","related-notebooks-title":"Notebooks","source-notebooks-prefix":"Source","other-links-title":"Other Links","code-links-title":"Code Links","launch-dev-container-title":"Launch Dev Container","launch-binder-title":"Launch Binder","article-notebook-label":"Article Notebook","notebook-preview-download":"Download Notebook","notebook-preview-download-src":"Download Source","notebook-preview-back":"Back to Article","manuscript-meca-bundle":"MECA Bundle","section-title-abstract":"Abstract","section-title-appendices":"Appendices","section-title-footnotes":"Footnotes","section-title-references":"References","section-title-reuse":"Reuse","section-title-copyright":"Copyright","section-title-citation":"Citation","appendix-attribution-cite-as":"For attribution, please cite this work as:","appendix-attribution-bibtex":"BibTeX citation:","title-block-author-single":"Author","title-block-author-plural":"Authors","title-block-affiliation-single":"Affiliation","title-block-affiliation-plural":"Affiliations","title-block-published":"Published","title-block-modified":"Modified","title-block-keywords":"Keywords","callout-tip-title":"Tip","callout-note-title":"Note","callout-warning-title":"Warning","callout-important-title":"Important","callout-caution-title":"Caution","code-summary":"Code","code-tools-menu-caption":"Code","code-tools-show-all-code":"Show All Code","code-tools-hide-all-code":"Hide All Code","code-tools-view-source":"View Source","code-tools-source-code":"Source Code","tools-share":"Share","tools-download":"Download","code-line":"Line","code-lines":"Lines","copy-button-tooltip":"Copy to Clipboard","copy-button-tooltip-success":"Copied!","repo-action-links-edit":"Edit this page","repo-action-links-source":"View source","repo-action-links-issue":"Report an issue","back-to-top":"Back to top","search-no-results-text":"No results","search-matching-documents-text":"matching documents","search-copy-link-title":"Copy link to search","search-hide-matches-text":"Hide additional matches","search-more-match-text":"more match in this document","search-more-matches-text":"more matches in this document","search-clear-button-title":"Clear","search-text-placeholder":"","search-detached-cancel-button-title":"Cancel","search-submit-button-title":"Submit","search-label":"Search","toggle-section":"Toggle section","toggle-sidebar":"Toggle sidebar navigation","toggle-dark-mode":"Toggle dark mode","toggle-reader-mode":"Toggle reader mode","toggle-navigation":"Toggle navigation","crossref-fig-title":"Figure","crossref-tbl-title":"Table","crossref-lst-title":"Listing","crossref-thm-title":"Theorem","crossref-lem-title":"Lemma","crossref-cor-title":"Corollary","crossref-prp-title":"Proposition","crossref-cnj-title":"Conjecture","crossref-def-title":"Definition","crossref-exm-title":"Example","crossref-exr-title":"Exercise","crossref-ch-prefix":"Chapter","crossref-apx-prefix":"Appendix","crossref-sec-prefix":"Section","crossref-eq-prefix":"Equation","crossref-lof-title":"List of Figures","crossref-lot-title":"List of Tables","crossref-lol-title":"List of Listings","environment-proof-title":"Proof","environment-remark-title":"Remark","environment-solution-title":"Solution","listing-page-order-by":"Order By","listing-page-order-by-default":"Default","listing-page-order-by-date-asc":"Oldest","listing-page-order-by-date-desc":"Newest","listing-page-order-by-number-desc":"High to Low","listing-page-order-by-number-asc":"Low to High","listing-page-field-date":"Date","listing-page-field-title":"Title","listing-page-field-description":"Description","listing-page-field-author":"Author","listing-page-field-filename":"File Name","listing-page-field-filemodified":"Modified","listing-page-field-subtitle":"Subtitle","listing-page-field-readingtime":"Reading Time","listing-page-field-wordcount":"Word Count","listing-page-field-categories":"Categories","listing-page-minutes-compact":"{0} min","listing-page-category-all":"All","listing-page-no-matches":"No matching items","listing-page-words":"{0} words"},"metadata":{"lang":"en","fig-responsive":true,"quarto-version":"1.4.549","bibliography":["references.bib"],"theme":"pulse","number-depth":3},"extensions":{"book":{"multiFile":true}}}},"projectFormats":["html"]}
\ No newline at end of file
diff --git a/content/.quarto/xref/04307669 b/content/.quarto/xref/04307669
index 7f7a477..20fe9e1 100644
--- a/content/.quarto/xref/04307669
+++ b/content/.quarto/xref/04307669
@@ -1 +1 @@
-{"headings":[],"entries":[],"options":{"chapters":true}}
\ No newline at end of file
+{"entries":[],"options":{"chapters":true},"headings":[]}
\ No newline at end of file
diff --git a/content/.quarto/xref/15f266d2 b/content/.quarto/xref/15f266d2
index 58a3446..41a5bb4 100644
--- a/content/.quarto/xref/15f266d2
+++ b/content/.quarto/xref/15f266d2
@@ -1 +1 @@
-{"options":{"chapters":true},"headings":["very-quick-intro-to-julia-and-turing","generate-data-from-normal-distribution","recover-distribution-parameters-with-turing","linear-models","boostrapping","hierarchical-models","bayesian-estimation","bayesian-mixed-linear-regression"],"entries":[]}
\ No newline at end of file
+{"headings":["brief-intro-to-julia-and-turing","installing-julia-and-packages","julia-basics","generate-data-from-normal-distribution","recover-distribution-parameters-with-turing","linear-models","boostrapping","hierarchical-models","bayesian-estimation","bayesian-mixed-linear-regression"],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
diff --git a/content/.quarto/xref/1a47137c b/content/.quarto/xref/1a47137c
index 7f7a477..42c82d3 100644
--- a/content/.quarto/xref/1a47137c
+++ b/content/.quarto/xref/1a47137c
@@ -1 +1 @@
-{"headings":[],"entries":[],"options":{"chapters":true}}
\ No newline at end of file
+{"headings":[],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
diff --git a/content/.quarto/xref/26afb962 b/content/.quarto/xref/26afb962
index 9b64c5e..02ac486 100644
--- a/content/.quarto/xref/26afb962
+++ b/content/.quarto/xref/26afb962
@@ -1 +1 @@
-{"headings":["categorical-predictors-condition-group","interactions","ordered-predictors-likert-scales","non-linear-relationships-polynomial-gams"],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
+{"entries":[],"options":{"chapters":true},"headings":["categorical-predictors-condition-group","interactions","ordered-predictors-likert-scales","non-linear-relationships-polynomial-gams"]}
\ No newline at end of file
diff --git a/content/.quarto/xref/26e6880e b/content/.quarto/xref/26e6880e
index 472aafc..9d79794 100644
--- a/content/.quarto/xref/26e6880e
+++ b/content/.quarto/xref/26e6880e
@@ -1 +1 @@
-{"headings":["the-data","gaussian-aka-linear-model","model-specification","posterior-predictive-check","scaled-gaussian-model","solution-1-directional-effect-of-condition","solution-2-avoid-exploring-negative-variance-values","the-problem-with-linear-models","shifted-lognormal-model","prior-on-minimum-rt","model-specification-1","interpretation","exgaussian-model","conditional-tau-tau-parameter","interpretation-1","shifted-wald-model","model-specification-2","model-comparison"],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
+{"headings":["the-data","gaussian-aka-linear-model","model-specification","posterior-predictive-check","scaled-gaussian-model","solution-1-directional-effect-of-condition","solution-2-avoid-exploring-negative-variance-values","the-problem-with-linear-models","shifted-lognormal-model","prior-on-minimum-rt","model-specification-1","interpretation","exgaussian-model","conditional-tau-tau-parameter","interpretation-1","shifted-wald-model","model-specification-2","model-comparison","other-models","linear-ballistic-accumulator-lba","leaky-competing-accumulator-lca","racing-diffusion-model-rdmrdm"],"entries":[],"options":{"chapters":true}}
\ No newline at end of file
diff --git a/content/.quarto/xref/6cbe9151 b/content/.quarto/xref/6cbe9151
index eb2e579..e139f2c 100644
--- a/content/.quarto/xref/6cbe9151
+++ b/content/.quarto/xref/6cbe9151
@@ -1 +1 @@
-{"entries":[],"headings":["preface","why-julia","why-bayesian","the-plan","looking-for-coauthors"],"options":{"chapters":true}}
\ No newline at end of file
+{"options":{"chapters":true},"headings":["preface","why-julia","why-bayesian","looking-for-coauthors"],"entries":[]}
\ No newline at end of file
diff --git a/content/.quarto/xref/a408ff3e b/content/.quarto/xref/a408ff3e
index 8532718..efbec7f 100644
--- a/content/.quarto/xref/a408ff3e
+++ b/content/.quarto/xref/a408ff3e
@@ -1 +1 @@
-{"headings":["evidence-accumulation","wald-distribution-revisited","drift-diffusion-model-ddm","other-models-lba-lnr","including-random-effects","additional-resources"],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
+{"options":{"chapters":true},"headings":["evidence-accumulation","wald-distribution-revisited","drift-diffusion-model-ddm","linear-ballistic-accumulator-lba","other-models-lnr-rdm","including-random-effects","random-intercept","random-slopes","performance-tips","additional-resources"],"entries":[]}
\ No newline at end of file
diff --git a/content/.quarto/xref/ce37606d b/content/.quarto/xref/ce37606d
index 42c82d3..20fe9e1 100644
--- a/content/.quarto/xref/ce37606d
+++ b/content/.quarto/xref/ce37606d
@@ -1 +1 @@
-{"headings":[],"options":{"chapters":true},"entries":[]}
\ No newline at end of file
+{"entries":[],"options":{"chapters":true},"headings":[]}
\ No newline at end of file
diff --git a/content/1_introduction.qmd b/content/1_introduction.qmd
index dff87f6..6701c03 100644
--- a/content/1_introduction.qmd
+++ b/content/1_introduction.qmd
@@ -3,9 +3,17 @@
![](https://img.shields.io/badge/status-not_started-red)
-## Very quick intro to Julia and Turing
+## Brief Intro to Julia and Turing
-Goal is to teach just enough so that the reader understands the code.
+Goal is to teach just enough so that the reader understands the code.
+We won't be discussing things like plotting (as it highly depends on the package used).
+
+### Installing Julia and Packages
+
+TODO.
+
+
+### Julia Basics
::: {.callout-important}
@@ -15,9 +23,11 @@ These are the most common sources of confusion and errors for newcomers to Julia
- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).
- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.
-- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These **symbols** are like character strings that are not manipulable (there are more efficient).
+- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These *symbols* are like character strings that are not manipulable (there are more efficient).
- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.
- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input "in-place" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).
+- **Macros**: Some functions start with `@`. These are called macros and are used to manipulate the code before it is run. For example, `@time` will measure the time it takes to run the code that follows.
+- **Unicode**: Julia is a modern language to supports unicode characters, which are used a lot for mathematical operations. You can get the *mu* `μ` character by typing `\mu` and pressing `TAB`.
:::
@@ -25,6 +35,8 @@ These are the most common sources of confusion and errors for newcomers to Julia
```{julia}
#| output: false
+#| code-fold: false
+
using Turing, Distributions, Random
using Makie
@@ -42,6 +54,9 @@ fig
### Recover Distribution Parameters with Turing
```{julia}
+#| output: false
+#| code-fold: false
+
@model function model_gaussian(x)
# Priors
μ ~ Uniform(0, 200)
@@ -53,13 +68,26 @@ fig
end
end
-model = model_gaussian(iq)
-sampling_results = sample(model, NUTS(), 400)
+fit_gaussian = model_gaussian(iq)
+chain_gaussian = sample(fit_gaussian, NUTS(), 400)
+```
-# Summary (95% CI)
-summarystats(sampling_results)
+Inspecting the chain variable will show various posterior statistics (including the mean, standard deviation, and diagnostic indices).
+
+```{julia}
+#| code-fold: false
+
+chain_gaussian
```
+For the purpose of this book, we will mostly focus on the 95% Credible Interval (CI), and we will assume that a parameter is ***"significant"*** if its CI does not include 0.
+
+```{julia}
+#| code-fold: false
+
+# Summary (95% CI)
+hpd(chain_gaussian)
+```
## Linear Models
diff --git a/content/4a_rt_descriptive.qmd b/content/4a_rt_descriptive.qmd
index 2675786..d882ed0 100644
--- a/content/4a_rt_descriptive.qmd
+++ b/content/4a_rt_descriptive.qmd
@@ -1,6 +1,6 @@
# Descriptive Models
-![](https://img.shields.io/badge/status-up_to_date-green)
+![](https://img.shields.io/badge/status-up_to_date-brightgreen)
## The Data
@@ -563,3 +563,20 @@ loo_compare((
The `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.
As one can see, traditional linear models perform terribly.
+
+## Other Models
+
+Other models are available to fit RT data, that we will demonstrate below for reference purposes.
+However, we won't be explaining them here, as we will revisit them in the next chapter in the context of choice modeling.
+
+### Linear Ballistic Accumulator (LBA)
+
+TODO.
+
+### Leaky Competing Accumulator (LCA)
+
+TODO.
+
+### Racing Diffusion Model (RDMRDM)
+
+TODO.
\ No newline at end of file
diff --git a/content/4b_rt_generative.qmd b/content/4b_rt_generative.qmd
index 0b8cb8f..3a3259d 100644
--- a/content/4b_rt_generative.qmd
+++ b/content/4b_rt_generative.qmd
@@ -46,14 +46,47 @@ While such "generative" models offer potential insights into the cognitive proce
## Drift Diffusion Model (DDM)
-Use DDM as a case study to introduce generative models
+Interestingly, the **Wald** model is actually a special case of a more general type called the **Drift Diffusion Model (DDM)** (named as such because the evidence accumulation is assumed to be a "diffusion" process, i.e., a random walk).
+One of the main difference is that in the Wald model, the drift rate $\nu$ must be *positive*, as it tracks the time taken by the diffusion processes to reach only one "positive" threshold $\alpha$.
-- [**Drift Diffusion Model (DDM) in R: A Tutorial**](https://dominiquemakowski.github.io/easyRT/articles/ddm.html)
+But what happens if we relax this and allow the drift rate to be null or negative? Many traces might never reach the upper threshold, and might instead reach high "negative" values.
-## Other Models (LBA, LNR)
+Drift Diffusion Models are useful to **jointly model RTs and a binary outcome**, such as 2 different choices or accuracy (i.e., "correct" vs. "error").
+
+![](media/rt_ddm.gif)
+
+
+The parameters are:
+
+- **Nu** $\nu$ : The drift rate (also sometimes denoted *delta* $\delta$), representing the average slope of the accumulation process towards the boundaries. The larger the (absolute value of the) drift rate, the more effective the evidence accumulation for the corresponding response option. A drift rate close to 0 suggests an ambiguous stimulus. Typical range: [-5, 5].
+- **Alpha** $\alpha$ : The boundary separation threshold is the distance between the two decision bounds (lower bound being at 0 and upper bound at *alpha* $\alpha$). It has been interpreted as a measure of response caution (i.e., of speed-accuracy trade-off, with high *alpha* $\alpha$ being related to high accuracy). It represents the amount of evidence that is needed to make a response. Typical range: [0.5, 2].
+- **Beta** $\beta$ : The initial bias towards any of the responses. The starting point of the accumulation process (in percentage of *alpha* $\alpha$: if $\alpha = 2.0$ and $\beta = 0.5$, then the actual starting point is $2.0*0.5=1$). Typical range: [0.3, 0.7].
+- **Tau** $\tau$ : The non-decision time. It represents all non-decisional process, such as stimulus encoding, motor processes, etc. Typical range (in seconds): [0.1, 0.5].
+
+
+This basic model is sometimes referred to as a Wiener model, as expanded versions of the DDM exist with additional parameters (e.g., variability of the drift rate).
+
+
+## Linear Ballistic Accumulator (LBA)
+
+TODO.
+
+## Other Models (LNR, RDM)
+
+TODO.
## Including Random Effects
+### Random Intercept
+
+TODO.
+
+### Random Slopes
+
+TODO.
+
+### Performance Tips
+
TODO.
## Additional Resources
diff --git a/content/5_individual.qmd b/content/5_individual.qmd
index 1d10c47..bbbcce0 100644
--- a/content/5_individual.qmd
+++ b/content/5_individual.qmd
@@ -1,6 +1,7 @@
-# Individual Parameters
+# Individual Differences
![](https://img.shields.io/badge/status-not_started-red)
-1. From mixed models
+1. Task reliability (Signal to Noise Ratio)
+1. Extracting Individual Parameters From mixed models
2. As prior-informed individual Bayesian models
\ No newline at end of file
diff --git a/content/_freeze/1_introduction/execute-results/html.json b/content/_freeze/1_introduction/execute-results/html.json
index 82e5a35..828cd85 100644
--- a/content/_freeze/1_introduction/execute-results/html.json
+++ b/content/_freeze/1_introduction/execute-results/html.json
@@ -1,8 +1,8 @@
{
- "hash": "f1b09c48ebcc0d8b9c817ca76b3d37bf",
+ "hash": "13a6386ee9710a33c192a1e02a587307",
"result": {
"engine": "jupyter",
- "markdown": "# Fundamentals of Bayesian Modeling in Julia\n\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Very quick intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code.\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These **symbols** are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n:::\n\n\n### Generate Data from Normal Distribution\n\n::: {#0c15ea13 .cell execution_count=1}\n``` {.julia .cell-code}\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n:::\n\n\n::: {#6de958d5 .cell execution_count=2}\n``` {.julia .cell-code}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=3}\n![](1_introduction_files/figure-html/cell-3-output-2.svg){}\n:::\n:::\n\n\n### Recover Distribution Parameters with Turing\n\n::: {#76fbbced .cell execution_count=3}\n``` {.julia .cell-code}\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nmodel = model_gaussian(iq)\nsampling_results = sample(model, NUTS(), 400)\n\n# Summary (95% CI)\nsummarystats(sampling_results)\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Info: Found initial step size\n└ ϵ = 0.05\n\rSampling: 0%|█ | ETA: 0:00:32\rSampling: 100%|█████████████████████████████████████████| Time: 0:00:01\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n\n::: {.ansi-escaped-output}\n```{=html}\nSummary Statistics\n parameters mean std mcse ess_bulk ess_tail rhat ⋯\n Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯\n μ 101.0966 0.6163 0.0285 464.5397 331.0063 1.0010 ⋯\n σ 14.5905 0.4758 0.0221 504.1965 231.1654 1.0362 ⋯\n 1 column omitted \n \n```\n:::\n\n:::\n:::\n\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n\n",
+ "markdown": "# Fundamentals of Bayesian Modeling in Julia\n\n![](https://img.shields.io/badge/status-not_started-red)\n\n\n## Brief Intro to Julia and Turing\n\nGoal is to teach just enough so that the reader understands the code. \nWe won't be discussing things like plotting (as it highly depends on the package used).\n\n### Installing Julia and Packages\n\nTODO.\n\n\n### Julia Basics\n\n::: {.callout-important}\n\n### Notable Differences with Python and R\n\nThese are the most common sources of confusion and errors for newcomers to Julia:\n\n- **1-indexing**: Similarly to R, Julia uses 1-based indexing, which means that the first element of a vector is `x[1]` (not `x[0]` as in Python).\n- **Positional; Keyword arguments**: Julia functions makes a clear distinction between positional and keyword arguments, and both are often separated by `;`. Positional arguments are typically passed without a name, while keyword arguments must be named (e.g., `scatter(0, 0; color=:red)`). Some functions might look like `somefunction(; arg1=val1, arg2=val2)`.\n- **Symbols**: Some arguments are prefixed with `:` (e.g., `:red` in `scatter(0, 0; color=:red)`). These *symbols* are like character strings that are not manipulable (there are more efficient).\n- **Explicit vectorization**: Julia does not vectorize operations by default. You need to use a dot `.` in front of functions and operators to have it apply element by element. For example, `sin.([0, 1, 2])` will apply the `sin()` function to each element of its vector.\n- **In-place operations**: Julia has a strong emphasis on performance, and in-place operations are often used to avoid unnecessary memory allocations. When functions modify their input \"in-place\" (without returns), a band `!` is used. For example, assuming `x = [0]` (1-element vector containing 0), `push!(x, 2)` will modify `x` in place (it is equivalent to `x = push(x, 2)`).\n- **Macros**: Some functions start with `@`. These are called macros and are used to manipulate the code before it is run. For example, `@time` will measure the time it takes to run the code that follows.\n- **Unicode**: Julia is a modern language to supports unicode characters, which are used a lot for mathematical operations. You can get the *mu* `μ` character by typing `\\mu` and pressing `TAB`.\n:::\n\n\n### Generate Data from Normal Distribution\n\n::: {#1f15e3d4 .cell execution_count=1}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Turing, Distributions, Random\nusing Makie\n\n# Random sample from a Normal(μ=100, σ=15)\niq = rand(Normal(100, 15), 500)\n```\n:::\n\n\n::: {#e3dbae1f .cell execution_count=2}\n``` {.julia .cell-code}\nfig = Figure()\nax = Axis(fig[1, 1], title=\"Distribution\")\ndensity!(ax, iq)\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=3}\n![](1_introduction_files/figure-html/cell-3-output-2.svg){}\n:::\n:::\n\n\n### Recover Distribution Parameters with Turing\n\n::: {#e37369d6 .cell execution_count=3}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_gaussian(x)\n # Priors\n μ ~ Uniform(0, 200)\n σ ~ Uniform(0, 30)\n\n # Check against each datapoint\n for i in 1:length(x)\n x[i] ~ Normal(μ, σ)\n end\nend\n\nfit_gaussian = model_gaussian(iq)\nchain_gaussian = sample(fit_gaussian, NUTS(), 400)\n```\n:::\n\n\nInspecting the chain variable will show various posterior statistics (including the mean, standard deviation, and diagnostic indices).\n\n::: {#31ba55af .cell execution_count=4}\n``` {.julia .cell-code code-fold=\"false\"}\nchain_gaussian\n```\n\n::: {.cell-output .cell-output-display execution_count=5}\n\n::: {.ansi-escaped-output}\n```{=html}\nChains MCMC chain (400×14×1 Array{Float64, 3}):\nIterations = 201:1:600\nNumber of chains = 1\nSamples per chain = 400\nWall duration = 8.8 seconds\nCompute duration = 8.8 seconds\nparameters = μ, σ\ninternals = lp, n_steps, is_accept, acceptance_rate, log_density, hamiltonian_energy, hamiltonian_energy_error, max_hamiltonian_energy_error, tree_depth, numerical_error, step_size, nom_step_size\nSummary Statistics\n parameters mean std mcse ess_bulk ess_tail rhat e ⋯\n Symbol Float64 Float64 Float64 Float64 Float64 Float64 ⋯\n μ 99.2403 0.6727 0.0333 414.3604 324.9996 0.9993 ⋯\n σ 14.4973 0.4440 0.0187 561.5709 284.5407 0.9976 ⋯\n 1 column omitted \nQuantiles\n parameters 2.5% 25.0% 50.0% 75.0% 97.5% \n Symbol Float64 Float64 Float64 Float64 Float64 \n μ 97.9096 98.7663 99.2552 99.7769 100.4228\n σ 13.6853 14.1811 14.5066 14.7917 15.3761\n \n```\n:::\n\n:::\n:::\n\n\nFor the purpose of this book, we will mostly focus on the 95% Credible Interval (CI), and we will assume that a parameter is ***\"significant\"*** if its CI does not include 0.\n\n::: {#45fb1631 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_gaussian)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ 97.8594 100.3178\n σ 13.5687 15.2885\n \n```\n:::\n\n:::\n:::\n\n\n## Linear Models\n\nUnderstand what the parameters mean (intercept, slopes, sigma).\n\n## Boostrapping\n\nIntroduce concepts related to pseudo-posterior distribution description\n\n## Hierarchical Models\n\nSimpson's paradox, random effects, how to leverage them to model interindividual differences\n\n## Bayesian estimation\n\nintroduce Bayesian estimation and priors over parameters\n\n## Bayesian mixed linear regression\n\nput everything together\n\n",
"supporting": [
"1_introduction_files\\figure-html"
],
diff --git a/content/_freeze/1_introduction/figure-html/cell-3-output-2.svg b/content/_freeze/1_introduction/figure-html/cell-3-output-2.svg
index f7b40c1..f600b86 100644
--- a/content/_freeze/1_introduction/figure-html/cell-3-output-2.svg
+++ b/content/_freeze/1_introduction/figure-html/cell-3-output-2.svg
@@ -2,197 +2,200 @@
-
+
-
+
-
+
-
-
-
-
+
-
+
+
+
+
+
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
+
+
+
-
-
+
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
-
+
+
+
+
-
-
+
+
diff --git a/content/_freeze/4a_rt_descriptive/execute-results/html.json b/content/_freeze/4a_rt_descriptive/execute-results/html.json
index 09524bf..aed7138 100644
--- a/content/_freeze/4a_rt_descriptive/execute-results/html.json
+++ b/content/_freeze/4a_rt_descriptive/execute-results/html.json
@@ -1,8 +1,8 @@
{
- "hash": "485397b899d19b6923f1cc82888d5854",
+ "hash": "f096b77d0a67e9d586fedbbaa7f759f7",
"result": {
"engine": "jupyter",
- "markdown": "# Descriptive Models\n\n![](https://img.shields.io/badge/status-up_to_date-green)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n::: {#def3e6bc .cell execution_count=2}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n1 1 Speed 0.7 false Low 2 1 Speed 0.392 true Very Low 3 1 Speed 0.46 false Very Low 4 1 Speed 0.455 false Very Low 5 1 Speed 0.505 true Low 6 1 Speed 0.773 false High 7 1 Speed 0.39 false High 8 1 Speed 0.587 true Low 9 1 Speed 0.603 false Low 10 1 Speed 0.435 false High
\n```\n:::\n:::\n\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n::: {#3b99ba16 .cell execution_count=3}\n``` {.julia .cell-code}\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n:::\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n::: {#60c565e5 .cell execution_count=4}\n``` {.julia .cell-code}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n \n```\n:::\n:::\n\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n::: {#fdfd5559 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#1e1a3766 .cell execution_count=6}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n σ 0.1652 0.1701\n μ_intercept 0.5071 0.5168\n μ_condition 0.1319 0.1457\n \n```\n:::\n\n:::\n:::\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n::: {#ba2b1593 .cell execution_count=7}\n``` {.julia .cell-code}\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n:::\n\n\n::: {#f02ce7d4 .cell fig-height='7' fig-width='10' execution_count=8}\n``` {.julia .cell-code}\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=8}\n```{=html}\n \n```\n:::\n:::\n\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n::: {#62ef3b30 .cell execution_count=9}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#4b488c39 .cell execution_count=10}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=10}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5081 0.5148\n μ_condition 0.1330 0.1446\n σ_intercept 0.1219 0.1271\n σ_condition 0.0714 0.0810\n \n```\n:::\n\n:::\n:::\n\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n::: {#7b17f69f .cell execution_count=11}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#fa8a4426 .cell execution_count=12}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=12}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5076 0.5148\n μ_condition 0.1316 0.1444\n σ_intercept 0.1223 0.1273\n σ_condition 0.0709 0.0803\n \n```\n:::\n\n:::\n:::\n\n\n::: {#ebf2b747 .cell execution_count=13}\n``` {.julia .cell-code}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=13}\n```{=html}\n \n```\n:::\n:::\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n\n::: {#07b852a9 .cell execution_count=14}\n``` {.julia .cell-code}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=14}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Specification\n\n::: {#dbebf70c .cell execution_count=15}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#76460a1b .cell execution_count=16}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=16}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n τ 0.1718 0.1792\n μ_intercept -1.1590 -1.1327\n μ_condition 0.3157 0.3430\n σ_intercept 0.3082 0.3228\n σ_condition 0.0327 0.0508\n \n```\n:::\n\n:::\n:::\n\n\n::: {#6a414e1f .cell execution_count=17}\n``` {.julia .cell-code}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=17}\n```{=html}\n \n```\n:::\n:::\n\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n::: {#a7089bbe .cell execution_count=18}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#bf20b174 .cell execution_count=19}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=19}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.3999 0.4062\n μ_condition 0.0618 0.0721\n σ_intercept 0.0381 0.0432\n σ_condition 0.0104 0.0185\n τ_intercept 0.1052 0.1130\n τ_condition 0.0641 0.0795\n \n```\n:::\n\n:::\n:::\n\n\n::: {#d4d95c07 .cell execution_count=20}\n``` {.julia .cell-code}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=20}\n```{=html}\n \n```\n:::\n:::\n\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n::: {#4df349b0 .cell execution_count=21}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n:::\n\n\n::: {#b9814b26 .cell execution_count=22}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_Wald; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=22}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n ν_intercept 5.0986 5.3197\n ν_condition -1.3387 -1.0493\n α_intercept 1.6605 1.7456\n α_condition 0.2060 0.3437\n τ_intercept 0.1808 0.1870\n τ_condition -0.0371 -0.0231\n \n```\n:::\n\n:::\n:::\n\n\n::: {#cf9d1165 .cell execution_count=23}\n``` {.julia .cell-code}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=23}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n::: {#398a7d32 .cell execution_count=24}\n``` {.julia .cell-code}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\n::: {.cell-output .cell-output-display execution_count=24}\n```\n\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\n┌────────────────┬──────────┬────────┬────────┐\n│ │ cv_elpd │ cv_avg │ weight │\n├────────────────┼──────────┼────────┼────────┤\n│ ExGaussian │ 0.00 │ 0.00 │ 1.00 │\n│ LogNormal │ -322.27 │ -0.03 │ 0.00 │\n│ Wald │ -379.85 │ -0.04 │ 0.00 │\n│ ScaledGaussian │ -2465.97 │ -0.26 │ 0.00 │\n│ Gaussian │ -2974.49 │ -0.31 │ 0.00 │\n└────────────────┴──────────┴────────┴────────┘\n```\n:::\n:::\n\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n",
+ "markdown": "# Descriptive Models\n\n![](https://img.shields.io/badge/status-up_to_date-brightgreen)\n\n## The Data\n\nFor this chapter, we will be using the data from @wagenmakers2008diffusion - Experiment 1 [also reanalyzed by @heathcote2012linear], that contains responses and response times for several participants in two conditions (where instructions emphasized either **speed** or **accuracy**).\nUsing the same procedure as the authors, we excluded all trials with uninterpretable response time, i.e., responses that are too fast (<180 ms) or too slow [>2 sec instead of >3 sec, see @theriault2024check for a discussion on outlier removal].\n\n::: {#def3e6bc .cell execution_count=2}\n``` {.julia .cell-code code-fold=\"false\"}\nusing Downloads, CSV, DataFrames, Random\nusing Turing, Distributions, SequentialSamplingModels\nusing GLMakie\n\nRandom.seed!(123) # For reproducibility\n\ndf = CSV.read(Downloads.download(\"https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv\"), DataFrame)\n\n# Show 10 first rows\nfirst(df, 10)\n```\n\n::: {.cell-output .cell-output-display execution_count=2}\n```{=html}\n1 1 Speed 0.7 false Low 2 1 Speed 0.392 true Very Low 3 1 Speed 0.46 false Very Low 4 1 Speed 0.455 false Very Low 5 1 Speed 0.505 true Low 6 1 Speed 0.773 false High 7 1 Speed 0.39 false High 8 1 Speed 0.587 true Low 9 1 Speed 0.603 false Low 10 1 Speed 0.435 false High
\n```\n:::\n:::\n\n\nIn the previous chapter, we modelled the error rate (the probability of making an error) using a logistic model, and observed that it was higher in the `\"Speed\"` condition. \nBut how about speed? We are going to first take interest in the RT of **Correct** answers only (as we can assume that errors are underpinned by a different *generative process*). \n\nAfter filtering out the errors, we create a new column, `Accuracy`, which is the \"binarization\" of the `Condition` column, and is equal to 1 when the condition is `\"Accuracy\"` and 0 when it is `\"Speed\"`.\n\n::: {#3b99ba16 .cell execution_count=3}\n``` {.julia .cell-code}\ndf = df[df.Error .== 0, :]\ndf.Accuracy = df.Condition .== \"Accuracy\"\n```\n:::\n\n\n::: {.callout-tip title=\"Code Tip\"}\nNote the usage of *vectorization* `.==` as we want to compare each element of the `Condition` vector to the target `\"Accuracy\"`.\n:::\n\n::: {#60c565e5 .cell execution_count=4}\n``` {.julia .cell-code}\nfunction plot_distribution(df, title=\"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n fig = Figure()\n ax = Axis(fig[1, 1], title=title,\n xlabel=\"RT (s)\",\n ylabel=\"Distribution\",\n yticksvisible=false,\n xticksvisible=false,\n yticklabelsvisible=false)\n Makie.density!(df[df.Condition .== \"Speed\", :RT], color=(\"#EF5350\", 0.7), label = \"Speed\")\n Makie.density!(df[df.Condition .== \"Accuracy\", :RT], color=(\"#66BB6A\", 0.7), label = \"Accuracy\")\n Makie.axislegend(\"Condition\"; position=:rt)\n Makie.ylims!(ax, (0, nothing))\n return fig\nend\n\nplot_distribution(df, \"Empirical Distribution of Data from Wagenmakers et al. (2018)\")\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=4}\n```{=html}\n \n```\n:::\n:::\n\n\n## Gaussian (aka *Linear*) Model\n\n::: {.callout-note}\nNote that until the last section of this chapter, we will disregard the existence of multiple participants (which require the inclusion of random effects in the model).\nWe will treat the data as if it was a single participant at first to better understand the parameters, but will show how to add random effects at the end.\n:::\n\nA linear model is the most common type of model. \nIt aims at predicting the **mean** $\\mu$ of the outcome variable using a **Normal** (aka *Gaussian*) distribution for the residuals.\nIn other words, it models the outcome $y$ as a Normal distribution with a mean $\\mu$ that is itself the result of a linear function of the predictors $X$ and a variance $\\sigma$ that is constant across all values of the predictors.\nIt can be written as $y = Normal(\\mu, \\sigma)$, where $\\mu = intercept + slope * X$.\n\nIn order to fit a Linear Model for RTs, we need to set a prior on all these parameters, namely:\n- The variance $\\sigma$ (correspondong to the \"spread\" of RTs)\n- The mean $\\mu$ for the intercept (i.e., at the reference condition which is in our case `\"Speed\"`)\n- The effect of the condition (the slope).\n\n### Model Specification\n\n::: {#fdfd5559 .cell execution_count=5}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Gaussian(rt; condition=nothing)\n\n # Set priors on variance, intercept and effect of condition\n σ ~ truncated(Normal(0, 0.5); lower=0)\n\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\n\nfit_Gaussian = model_Gaussian(df.RT; condition=df.Accuracy)\nchain_Gaussian = sample(fit_Gaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#1e1a3766 .cell execution_count=6}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_Gaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=6}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n σ 0.1652 0.1701\n μ_intercept 0.5071 0.5168\n μ_condition 0.1319 0.1457\n \n```\n:::\n\n:::\n:::\n\n\nThe effect of Condition is significant, people are on average slower (higher RT) when condition is `\"Accuracy\"`.\nBut is our model good?\n\n### Posterior Predictive Check\n\n::: {#ba2b1593 .cell execution_count=7}\n``` {.julia .cell-code}\npred = predict(model_Gaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_Gaussian)\npred = Array(pred)\n```\n:::\n\n\n::: {#f02ce7d4 .cell fig-height='7' fig-width='10' execution_count=8}\n``` {.julia .cell-code}\nfig = plot_distribution(df, \"Predictions made by Gaussian (aka Linear) Model\")\nfor i in 1:length(chain_Gaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=8}\n```{=html}\n \n```\n:::\n:::\n\n\n## Scaled Gaussian Model\n\nThe previous model, despite its poor fit to the data, suggests that the mean RT is higher for the `Accuracy` condition. But it seems like the distribution is also *wider* (response time is more variable). \nTypical linear model estimate only one value for sigma $\\sigma$ for the whole model, hence the requirement for **homoscedasticity**.\n\n::: {.callout-note}\n**Homoscedasticity**, or homogeneity of variances, is the assumption of similar variances accross different values of predictors. \nIt is important in linear models as only one value for sigma $\\sigma$ is estimated.\n:::\n\nIs it possible to set sigma $\\sigma$ as a parameter that would depend on the condition, in the same way as mu $\\mu$? In Julia, this is very simple.\n\nAll we need is to set sigma $\\sigma$ as the result of a linear function, such as $\\sigma = intercept + slope * condition$.\nThis means setting a prior on the intercept of sigma $\\sigma$ (in our case, the variance in the reference condition) and a prior on how much this variance changes for the other condition.\nThis change can, by definition, be positive or negative (i.e., the other condition can have either a biggger or a smaller variance), so the prior over the effect of condition should ideally allow for positive and negative values (e.g., `σ_condition ~ Normal(0, 0.1)`).\n\nBut this leads to an **important problem**.\n\n::: {.callout-important}\nThe combination of an intercept and a (possible negative) slope for sigma $\\sigma$ technically allows for negative variance values, which is impossible (distributions cannot have a negative variance).\nThis issue is one of the most important to address when setting up complex models for RTs.\n:::\n\nIndeed, even if we set a very narrow prior on the intercept of sigma $\\sigma$ to fix it at for instance **0.14**, and a narrow prior on the effect of condition, say $Normal(0, 0.001)$, an effect of condition of **-0.15** is still possible (albeit with very low probability). \nAnd such effect would lead to a sigma $\\sigma$ of **0.14 - 0.15 = -0.01**, which would lead to an error (and this will often happen as the sampling process does explore unlikely regions of the parameter space).\n\n\n### Solution 1: Directional Effect of Condition\n\nOne possible (but not recommended) solution is to simply make it impossible for the effect of condition to be negative by *Truncating* the prior to a lower bound of 0. \nThis can work in our case, because we know that the comparison condition is likely to have a higher variance than the reference condition (the intercept) - and if it wasn't the case, we could have changed the reference factor.\nHowever, this is not a good practice as we are enforcing a very strong a priori specific direction of the effect, which is not always justified.\n\n::: {#62ef3b30 .cell execution_count=9}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0) # Same prior as previously\n σ_condition ~ truncated(Normal(0, 0.1); lower=0) # Enforce positivity\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#4b488c39 .cell execution_count=10}\n``` {.julia .cell-code code-fold=\"false\"}\n# Summary (95% CI)\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=10}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5081 0.5148\n μ_condition 0.1330 0.1446\n σ_intercept 0.1219 0.1271\n σ_condition 0.0714 0.0810\n \n```\n:::\n\n:::\n:::\n\n\nWe can see that the effect of condition on sigma $\\sigma$ is significantly positive: the variance is higher in the `Accuracy` condition as compared to the `Speed` condition. \n\n### Solution 2: Avoid Exploring Negative Variance Values\n\nThe other trick is to force the sampling algorithm to avoid exploring negative variance values (when sigma $\\sigma$ < 0).\nThis can be done by adding a conditional statement when sigma $\\sigma$ is negative to avoid trying this value and erroring, and instead returning an infinitely low model probability (`-Inf`) to push away the exploration of this impossible region.\n\n::: {#7b17f69f .cell execution_count=11}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ScaledlGaussian(rt; condition=nothing)\n\n # Priors\n μ_intercept ~ truncated(Normal(0, 1); lower=0)\n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Normal(μ, σ)\n end\nend\n\nfit_ScaledlGaussian = model_ScaledlGaussian(df.RT; condition=df.Accuracy)\nchain_ScaledGaussian = sample(fit_ScaledlGaussian, NUTS(), 400)\n```\n:::\n\n\n::: {#fa8a4426 .cell execution_count=12}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ScaledGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=12}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.5076 0.5148\n μ_condition 0.1316 0.1444\n σ_intercept 0.1223 0.1273\n σ_condition 0.0709 0.0803\n \n```\n:::\n\n:::\n:::\n\n\n::: {#ebf2b747 .cell execution_count=13}\n``` {.julia .cell-code}\npred = predict(model_ScaledlGaussian([(missing) for i in 1:length(df.RT)], condition=df.Accuracy), chain_ScaledGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Scaled Gaussian Model\")\nfor i in 1:length(chain_ScaledGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=13}\n```{=html}\n \n```\n:::\n:::\n\n\n\n\nAlthough relaxing the homoscedasticity assumption is a good step forward, allowing us to make **richer conclusions** and better capturing the data.\nDespite that, the Gaussian model stil seem to be a poor fit to the data.\n\n## The Problem with Linear Models\n\nReaction time (RTs) have been traditionally modeled using traditional linear models and their derived statistical tests such as *t*-test and ANOVAs. Importantly, linear models - by definition - will try to predict the *mean* of the outcome variable by estimating the \"best fitting\" *Normal* distribution. In the context of reaction times (RTs), this is not ideal, as RTs typically exhibit a non-normal distribution, skewed towards the left with a long tail towards the right. This means that the parameters of a Normal distribution (mean $\\mu$ and standard deviation $\\sigma$) are not good descriptors of the data.\n\n![](media/rt_normal.gif)\n\n> Linear models try to find the best fitting Normal distribution for the data. However, for reaction times, even the best fitting Normal distribution (in red) does not capture well the actual data (in grey).\n\nA popular mitigation method to account for the non-normality of RTs is to transform the data, using for instance the popular *log-transform*. \nHowever, this practice should be avoided as it leads to various issues, including loss of power and distorted results interpretation [@lo2015transform; @schramm2019reaction].\nInstead, rather than applying arbitrary data transformation, it would be better to swap the Normal distribution used by the model for a more appropriate one that can better capture the characteristics of a RT distribution.\n\n\n## Shifted LogNormal Model\n\nOne of the obvious candidate alternative to the log-transformation would be to use a model with a Log-transformed Normal distribution.\nA LogNormal distribution is a distribution of a random variable whose logarithm is normally distributed. In this model, the *mean* $\\mu$ and is defined on the log-scale, and effects must be interpreted as multiplicative rather than additive (the condition increases the mean RT by a factor of $\\exp(\\mu_{condition})$). \n\nNote that for LogNormal distributions (as it is the case for many of the models introduced in the rest of the capter), the distribution parameters ($\\mu$ and $\\sigma$) are not independent with respect to the mean and the standard deviation (SD).\nThe empirical SD increases when the *mean* $\\mu$ increases (which is seen as a feature rather than a bug, as it is consistent with typical reaction time data [@wagenmakers2005relation]).\n\nA **Shifted** LogNormal model introduces a shift (a delay) parameter *tau* $\\tau$ that corresponds to the minimum \"starting time\" of the response process.\n\nWe need to set a prior for this parameter, which is usually truncated between 0 (to exclude negative minimum times) and the minimum RT of the data (the logic being that the minimum delay for response must be lower than the faster response actually observed).\n\nWhile $Uniform(0, min(RT))$ is a common choice of prior, it is not ideal as it implies that all values between 0 and the minimum RT are equally likely, which is not the case.\nIndeed, psychology research has shown that such minimum response time for Humans is often betwen 100 and 250 ms. \nMoreover, in our case, we explicitly removed all RTs below 180 ms, suggesting that the minimum response time is more likely to approach 180 ms than 0 ms.\n\n### Prior on Minimum RT\n\nInstead of a $Uniform$ prior, we will use a $Gamma(1.1, 11)$ distribution (truncated at min. RT), as this particular parameterization reflects the low probability of very low minimum RTs (near 0) and a steadily increasing probability for increasing times. \n\n::: {#07b852a9 .cell execution_count=14}\n``` {.julia .cell-code}\nxaxis = range(0, 0.3, 1000)\nfig = lines(xaxis, pdf.(Gamma(1.1, 11), xaxis); color=:blue, label=\"Gamma(1.1, 11)\")\nvlines!([minimum(df.RT)]; color=\"red\", linestyle=:dash, label=\"Min. RT = 0.18 s\")\naxislegend()\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=14}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Specification\n\n::: {#dbebf70c .cell execution_count=15}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_LogNormal(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n τ ~ truncated(Gamma(1.1, 11); upper=min_rt)\n\n μ_intercept ~ Normal(0, exp(1)) # On the log-scale: exp(μ) to get value in seconds\n μ_condition ~ Normal(0, exp(0.3))\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ShiftedLogNormal(μ, σ, τ)\n end\nend\n\nfit_LogNormal = model_LogNormal(df.RT; condition=df.Accuracy)\nchain_LogNormal = sample(fit_LogNormal, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#76460a1b .cell execution_count=16}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_LogNormal; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=16}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n τ 0.1718 0.1792\n μ_intercept -1.1590 -1.1327\n μ_condition 0.3157 0.3430\n σ_intercept 0.3082 0.3228\n σ_condition 0.0327 0.0508\n \n```\n:::\n\n:::\n:::\n\n\n::: {#6a414e1f .cell execution_count=17}\n``` {.julia .cell-code}\npred = predict(model_LogNormal([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_LogNormal)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_LogNormal)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=17}\n```{=html}\n \n```\n:::\n:::\n\n\nThis model provides a much better fit to the data, and confirms that the `Accuracy` condition is associated with higher RTs and higher variability (i.e., a larger distribution width).\n\n\n::: {.callout-note}\n\n### LogNormal distributions in nature\n\nThe reason why the Normal distribution is so ubiquituous in nature (and hence used as a good default) is due to the **Central Limit Theorem**, which states that the sum of a large number of independent random variables will be approximately normally distributed. Because many things in nature are the result of the *addition* of many random processes, the Normal distribution is very common in real life.\n\nHowever, it turns out that the multiplication of random variables result in a **LogNormal** distribution, and multiplicating (rather than additive) cascades of processes are also very common in nature, from lengths of latent periods of infectious diseases to distribution of mineral resources in the Earth's crust, and the elemental mechanisms at stakes in physics and cell biolody [@limpert2001log].\n\nThus, using LogNormal distributions for RTs can be justified with the assumption that response times are the result of multiplicative stochastic processes happening in the brain.\n\n:::\n\n\n## ExGaussian Model\n\nAnother popular model to describe RTs uses the **ExGaussian** distribution, i.e., the *Exponentially-modified Gaussian* distribution [@balota2011moving; @matzke2009psychological].\n\nThis distribution is a convolution of normal and exponential distributions and has three parameters, namely *mu* $\\mu$ and *sigma* $\\sigma$ - the mean and standard deviation of the Gaussian distribution - and *tau* $\\tau$ - the exponential component of the distribution (note that although denoted by the same letter, it does not correspond directly to a shift of the distribution). \nIntuitively, these parameters reflect the centrality, the width and the tail dominance, respectively.\n\n![](media/rt_exgaussian.gif)\n\n\nBeyond the descriptive value of these types of models, some have tried to interpret their parameters in terms of **cognitive mechanisms**, arguing for instance that changes in the Gaussian components ($\\mu$ and $\\sigma$) reflect changes in attentional processes [e.g., \"the time required for organization and execution of the motor response\"; @hohle1965inferred], whereas changes in the exponential component ($\\tau$) reflect changes in intentional (i.e., decision-related) processes [@kieffaber2006switch]. \nHowever, @matzke2009psychological demonstrate that there is likely no direct correspondence between ex-Gaussian parameters and cognitive mechanisms, and underline their value primarily as **descriptive tools**, rather than models of cognition *per se*.\n\nDescriptively, the three parameters can be interpreted as:\n\n- **Mu** $\\mu$ : The location / centrality of the RTs. Would correspond to the mean in a symmetrical distribution.\n- **Sigma** $\\sigma$ : The variability and dispersion of the RTs. Akin to the standard deviation in normal distributions.\n- **Tau** $\\tau$ : Tail weight / skewness of the distribution.\n\n::: {.callout-important}\nNote that these parameters are not independent with respect to distribution characteristics, such as the empirical mean and SD. \nBelow is an example of different distributions with the same location (*mu* $\\mu$) and dispersion (*sigma* $\\sigma$) parameters.\nAlthough only the tail weight parameter (*tau* $\\tau$) is changed, the whole distribution appears to shift is centre of mass. \nHence, one should be careful note to interpret the values of *mu* $\\mu$ directly as the \"mean\" or the distribution peak and *sigma* $\\sigma$ as the SD or the \"width\".\n:::\n\n![](media/rt_exgaussian2.gif)\n\n### Conditional Tau $\\tau$ Parameter\n\nIn the same way as we modeled the effect of the condition on the variance component *sigma* $\\sigma$, we can do the same for any other parameters, including the exponential component *tau* $\\tau$.\nAll wee need is to set a prior on the intercept and the condition effect, and make sure that $\\tau > 0$. \n\n::: {#a7089bbe .cell execution_count=18}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_ExGaussian(rt; condition=nothing)\n\n # Priors \n μ_intercept ~ Normal(0, 1) \n μ_condition ~ Normal(0, 0.3)\n\n σ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n σ_condition ~ Normal(0, 0.1)\n\n τ_intercept ~ truncated(Normal(0, 0.5); lower=0)\n τ_condition ~ Normal(0, 0.1)\n\n for i in 1:length(rt)\n μ = μ_intercept + μ_condition * condition[i]\n σ = σ_intercept + σ_condition * condition[i]\n if σ < 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ <= 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ ExGaussian(μ, σ, τ)\n end\nend\n\nfit_ExGaussian = model_ExGaussian(df.RT; condition=df.Accuracy)\nchain_ExGaussian = sample(fit_ExGaussian, NUTS(), 400)\n```\n:::\n\n\n### Interpretation\n\n::: {#bf20b174 .cell execution_count=19}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_ExGaussian; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=19}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n μ_intercept 0.3999 0.4062\n μ_condition 0.0618 0.0721\n σ_intercept 0.0381 0.0432\n σ_condition 0.0104 0.0185\n τ_intercept 0.1052 0.1130\n τ_condition 0.0641 0.0795\n \n```\n:::\n\n:::\n:::\n\n\n::: {#d4d95c07 .cell execution_count=20}\n``` {.julia .cell-code}\npred = predict(model_ExGaussian([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_ExGaussian)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted LogNormal Model\")\nfor i in 1:length(chain_ExGaussian)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=20}\n```{=html}\n \n```\n:::\n:::\n\n\nThe ExGaussian model also provides an excellent fit to the data. \nMoreover, by modeling more parameters (including *tau* $\\tau$), we can draw more nuanced conclusions.\nIn this case, the `Accuracy` condition is associated with higher RTs, higher variability, and a heavier tail (i.e., more extreme values).\n\n## Shifted Wald Model\n\nThe **Wald** distribution, also known as the **Inverse Gaussian** distribution, corresponds to the distribution of the first passage time of a Wiener process with a drift rate $\\mu$ and a diffusion rate $\\sigma$.\nWhile we will unpack this definition below and emphasize its important consequences, one can first note that it has been described as a potential model for RTs when convoluted with an *exponential* distribution (in the same way that the ExGaussian distribution is a convolution of a Gaussian and an exponential distribution).\nHowever, this **Ex-Wald** model [@schwarz2001ex] was shown to be less appropriate than one of its variant, the **Shifted Wald** distribution [@heathcote2004fitting; @anders2016shifted].\n\nNote that the Wald distribution, similarly to the models that we will be covering next (the \"generative\" models), is different from the previous distributions in that it is not characterized by a \"location\" and \"scale\" parameters (*mu* $\\mu$ and *sigma* $\\sigma$).\nInstead, the parameters of the Shifted Wald distribution are:\n\n- **Nu** $\\nu$ : A **drift** parameter, corresponding to the strength of the evidence accumulation process.\n- **Alpha** $\\alpha$ : A **threshold** parameter, corresponding to the amount of evidence required to make a decision.\n- **Tau** $\\tau$ : A **delay** parameter, corresponding to the non-response time (i.e., the minimum time required to process the stimulus and respond). A shift parameter similar to the one in the Shifted LogNormal model.\n\n![](media/rt_wald.gif)\n\nAs we can see, these parameters do not have a direct correspondence with the mean and standard deviation of the distribution.\nTheir interpretation is more complex but, as we will see below, offers a window to a new level of interpretation.\n\n::: {.callout-note}\nExplanations regarding these new parameters will be provided in the next chapter.\n:::\n\n### Model Specification\n\n::: {#4df349b0 .cell execution_count=21}\n``` {.julia .cell-code code-fold=\"false\"}\n@model function model_Wald(rt; min_rt=minimum(df.RT), condition=nothing)\n\n # Priors \n ν_intercept ~ truncated(Normal(1, 3); lower=0)\n ν_condition ~ Normal(0, 1)\n\n α_intercept ~ truncated(Normal(0, 1); lower=0)\n α_condition ~ Normal(0, 0.5)\n\n τ_intercept ~ truncated(Gamma(1.1, 11); upper=min_rt)\n τ_condition ~ Normal(0, 0.01)\n\n for i in 1:length(rt)\n ν = ν_intercept + ν_condition * condition[i]\n if ν <= 0 # Avoid negative drift\n Turing.@addlogprob! -Inf\n return nothing\n end\n α = α_intercept + α_condition * condition[i]\n if α <= 0 # Avoid negative variance values\n Turing.@addlogprob! -Inf\n return nothing\n end\n τ = τ_intercept + τ_condition * condition[i]\n if τ < 0 # Avoid negative tau values\n Turing.@addlogprob! -Inf\n return nothing\n end\n rt[i] ~ Wald(ν, α, τ)\n end\nend\n\nfit_Wald = model_Wald(df.RT; condition=df.Accuracy)\nchain_Wald = sample(fit_Wald, NUTS(), 600)\n```\n:::\n\n\n::: {#b9814b26 .cell execution_count=22}\n``` {.julia .cell-code code-fold=\"false\"}\nhpd(chain_Wald; alpha=0.05)\n```\n\n::: {.cell-output .cell-output-display execution_count=22}\n\n::: {.ansi-escaped-output}\n```{=html}\nHPD\n parameters lower upper \n Symbol Float64 Float64 \n ν_intercept 5.0986 5.3197\n ν_condition -1.3387 -1.0493\n α_intercept 1.6605 1.7456\n α_condition 0.2060 0.3437\n τ_intercept 0.1808 0.1870\n τ_condition -0.0371 -0.0231\n \n```\n:::\n\n:::\n:::\n\n\n::: {#cf9d1165 .cell execution_count=23}\n``` {.julia .cell-code}\npred = predict(model_Wald([(missing) for i in 1:length(df.RT)]; condition=df.Accuracy), chain_Wald)\npred = Array(pred)\n\nfig = plot_distribution(df, \"Predictions made by Shifted Wald Model\")\nfor i in 1:length(chain_Wald)\n lines!(Makie.KernelDensity.kde(pred[:, i]), color=ifelse(df.Accuracy[i] == 1, \"#388E3C\", \"#D32F2F\"), alpha=0.1)\nend\nfig\n```\n\n::: {.cell-output .cell-output-stderr}\n```\n┌ Warning: Found `resolution` in the theme when creating a `Scene`. The `resolution` keyword for `Scene`s and `Figure`s has been deprecated. Use `Figure(; size = ...` or `Scene(; size = ...)` instead, which better reflects that this is a unitless size and not a pixel resolution. The key could also come from `set_theme!` calls or related theming functions.\n└ @ Makie C:\\Users\\domma\\.julia\\packages\\Makie\\VRavR\\src\\scenes.jl:220\n```\n:::\n\n::: {.cell-output .cell-output-display execution_count=23}\n```{=html}\n \n```\n:::\n:::\n\n\n### Model Comparison\n\nAt this stage, given the multiple options avaiable to model RTs, you might be wondering which model is the best.\nOne can compare the models using the **Leave-One-Out Cross-Validation (LOO-CV)** method, which is a Bayesian method to estimate the out-of-sample predictive accuracy of a model.\n\n::: {#398a7d32 .cell execution_count=24}\n``` {.julia .cell-code}\nusing ParetoSmooth\n\nloo_Gaussian = psis_loo(fit_Gaussian, chain_Gaussian, source=\"mcmc\")\nloo_ScaledGaussian = psis_loo(fit_ScaledlGaussian, chain_ScaledGaussian, source=\"mcmc\")\nloo_LogNormal = psis_loo(fit_LogNormal, chain_LogNormal, source=\"mcmc\")\nloo_ExGaussian = psis_loo(fit_ExGaussian, chain_ExGaussian, source=\"mcmc\")\nloo_Wald = psis_loo(fit_Wald, chain_Wald, source=\"mcmc\")\n\nloo_compare((\n Gaussian = loo_Gaussian, \n ScaledGaussian = loo_ScaledGaussian, \n LogNormal = loo_LogNormal, \n ExGaussian = loo_ExGaussian, \n Wald = loo_Wald))\n```\n\n::: {.cell-output .cell-output-display execution_count=24}\n```\n\n```\n:::\n\n::: {.cell-output .cell-output-stdout}\n```\n┌────────────────┬──────────┬────────┬────────┐\n│ │ cv_elpd │ cv_avg │ weight │\n├────────────────┼──────────┼────────┼────────┤\n│ ExGaussian │ 0.00 │ 0.00 │ 1.00 │\n│ LogNormal │ -322.27 │ -0.03 │ 0.00 │\n│ Wald │ -379.85 │ -0.04 │ 0.00 │\n│ ScaledGaussian │ -2465.97 │ -0.26 │ 0.00 │\n│ Gaussian │ -2974.49 │ -0.31 │ 0.00 │\n└────────────────┴──────────┴────────┴────────┘\n```\n:::\n:::\n\n\nThe `loo_compare()` function orders models from best to worse based on their ELPD (Expected Log Pointwise Predictive Density) and provides the difference in ELPD between the best model and the other models.\nAs one can see, traditional linear models perform terribly.\n\n\n## Other Models\n\nOther models are available to fit RT data, that we will demonstrate below for reference purposes.\nHowever, we won't be explaining them here, as we will revisit them in the next chapter in the context of choice modeling.\n\n### Linear Ballistic Accumulator (LBA)\n\nTODO.\n\n### Leaky Competing Accumulator (LCA)\n\nTODO.\n\n### Racing Diffusion Model (RDMRDM)\n\nTODO.\n\n",
"supporting": [
"4a_rt_descriptive_files\\figure-html"
],
diff --git a/content/_quarto.yml b/content/_quarto.yml
index ec92f88..12533c3 100644
--- a/content/_quarto.yml
+++ b/content/_quarto.yml
@@ -7,7 +7,7 @@ book:
author: "Dominique Makowski"
date: "03/07/2024"
cover-image: "media/cover.png"
- favicon: "media/cover_icon.png"
+ favicon: "media/cover.png"
chapters:
- index.qmd
- 1_introduction.qmd
@@ -17,7 +17,7 @@ book:
chapters:
- 4a_rt_descriptive.qmd
- 4b_rt_generative.qmd
- # - 5_individual.qmd
+ - 5_individual.qmd
# - part: "Fundamentals"
# chapters:
# - 1_introduction.qmd
diff --git a/content/index.qmd b/content/index.qmd
index fa94e73..3e16f8a 100644
--- a/content/index.qmd
+++ b/content/index.qmd
@@ -10,20 +10,10 @@ Importantly, it is currently the only language in which we can fit all the cogni
Unfortunately, cognitive models often involve distributions for which Frequentist estimations are not yet implemented, and usually contain a lot of parameters (due to the presence of **random effects**), which makes traditional algorithms fail to converge.
Simply put, the Bayesian approach is the only one currently robust enough to fit these somewhat models.
-## The Plan
-
-As this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable to keep up with the latest best practices.
-
-- [ ] Decide on the framework to build the book in a reproducible and collaborative manner (Quarto?)
-- [ ] Set up the infrastructure to automatically build it using GitHub actions and host it on GitHub pages
-- [ ] Write the content of the book
-- [ ] Referencing
- - Add Zenodo DOI and reference (but how to deal with evolving author? Through versioning?)
- - Publish a paper to present the book project ([JOSE](https://jose.theoj.org/))?
-
-
## Looking for Coauthors
+As this is a fast-evolving field (both from the theoretical - with new models being proposed - and the technical side - with improvements to the packages and the algorithms), the book needs to be future-resilient and updatable by contributors to keep up with the latest best practices.
+
This project can only be achieved by a team, and I suspect no single person has currently all the skills and knowledge to cover all the content. We need many people who have strengths in various aspects, such as Julia/Turing, theory, writing, making plots etc.
Most importantly, this project can serve as a way for us to learn more about this approach to psychological science.
diff --git a/content/media/animations_rt.jl b/content/media/animations_rt1.jl
similarity index 55%
rename from content/media/animations_rt.jl
rename to content/media/animations_rt1.jl
index 952d329..e3ec145 100644
--- a/content/media/animations_rt.jl
+++ b/content/media/animations_rt1.jl
@@ -135,7 +135,7 @@ m = Observable(mean(rand(ExGaussian(0.3, 0.2, 0.001), 100_000)))
fig = Figure()
ax = Axis(
fig[1, 1],
- title=@lift("Wald(μ = $(round($μ, digits = 1)), σ = $(round($σ, digits = 2)), τ = $(round($τ, digits = 3)))"),
+ title=@lift("ExGaussian(μ = $(round($μ, digits = 1)), σ = $(round($σ, digits = 2)), τ = $(round($τ, digits = 3)))"),
xlabel="RT (s)",
ylabel="Distribution",
yticksvisible=false,
@@ -233,215 +233,3 @@ end
frames = range(0, 1, length=120)
record(make_animation, fig, "rt_wald.gif", frames; framerate=30)
-
-# Random Walk =====================================================================================
-
-# Functions
-function random_walk(n)
- x = zeros(n)
- for i in 2:n
- x[i] = x[i-1] + rand([-1, 1])
- end
- return x
-end
-
-function generate_trace(n, i=1)
- y = random_walk(n)
- x = range(0, 0.7, length=n)
- return DataFrame(x=x, y=y, iteration=i)
-end
-
-# Animation settings
-n_frames = 240
-frames = range(0, 1, length=n_frames)
-
-# Make trace data
-df = DataFrame(x=Float64[], y=Float64[], iteration=Int64[])
-for i in 1:40
- df = vcat(df, generate_trace(200, i))
-end
-df.Frame = repeat(frames, inner=Int(ceil(nrow(df) / n_frames)))[1:nrow(df)]
-
-# Find crossing point
-df_crossings = DataFrame(y=Float64[], iteration=Int64[], Frame=Float64[])
-for iter in unique(df.iteration)
- data = df[df.iteration.==iter, :]
- # Find y when x closest to 0.7
- idx = argmin(abs.(data.x .- 0.7))
- push!(df_crossings, (data.y[idx], iter, data.Frame[idx]))
-end
-
-# Density
-density_points = Observable([0])
-density_alpha = Observable(0)
-
-# Initialize the figure
-fig = Figure()
-ax1 = Axis(
- fig[1, 1],
- title="Random Walk",
- xlabel="Time",
- # ylabel="Evidence",
- yticksvisible=false,
- xticksvisible=false,
- yticklabelsvisible=false
-)
-xlims!(ax1, 0, 0.73)
-ylims!(ax1, -40, 40)
-colsize!(fig.layout, 1, Relative(2 / 3))
-hidespines!(ax1, :r)
-
-ax2 = Axis(
- fig[1, 2],
- title="Crossing Distribution",
- # xlabel="Time",
- # ylabel="Evidence",
- yticksvisible=false,
- xticksvisible=false,
- yticklabelsvisible=false,
- xticklabelsvisible=false
-)
-ylims!(ax2, -40, 40)
-xlims!(ax2; low=0)
-hidespines!(ax2, :l)
-colgap!(fig.layout, 1, 0.1)
-
-
-points = Dict()
-for iter in unique(df.iteration)
- points["i"*string(iter)] = Observable(Point2f[(0, 0)])
-end
-
-for iter in unique(df.iteration)
- lines!(ax1, points["i"*string(iter)], color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=0.9)[iter])
-end
-vlines!(ax1, 0.7, color=:red, linewidth=2, linestyle=:dash)
-density!(ax2, density_points, npoints=1000, direction=:y,
- color=@lift((:orange, $density_alpha)))
-fig
-
-function make_animation(frame)
- # Trace lines
- data = df[df.Frame.==frame, :]
- for row in eachrow(data)
- iter = row.iteration
- new_point = Point2f(row.x, row.y)
- points["i"*string(iter)][] = push!(points["i"*string(iter)][], new_point)
- end
-
- # Cross points
- cross = df_crossings[df_crossings.Frame.==frame, :]
- if nrow(cross) > 0
- # ax1
- lines!(ax1, [0.7, 0.73], [cross.y[1], cross.y[1]],
- color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=1)[cross.iteration[1]])
- # ax2
- density_alpha[] = 1
- scatter!(ax2, 0.0, cross.y[1], markersize=20,
- color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=0.8)[cross.iteration[1]])
- density_points[] = push!(density_points[], cross.y[1])
- end
-end
-
-# animation settings
-record(make_animation, fig, "rt_randomwalk.gif", frames; framerate=30)
-
-
-
-# Walk Generation =====================================================================================
-
-
-using DataFrames
-using SequentialSamplingModels
-
-α = Observable(1.5)
-τ = Observable(0.05)
-ν = Observable(3.0)
-
-# Initialize the figure
-fig = Figure()
-ax1 = Axis(
- fig[1, 1],
- title=@lift("Wald(ν = $(round($ν, digits = 1)), α = $(round($α, digits = 1)), τ = $(round($τ, digits = 2)))"),
- # xlabel="Time",
- ylabel="Distribution",
- yticksvisible=false,
- xticksvisible=false,
- yticklabelsvisible=false,
- xticklabelsvisible=false
-)
-hidespines!(ax1, :b)
-xlims!(ax1; low=0, high=1.5)
-ylims!(ax1; low=0, high=3.5)
-
-ax2 = Axis(
- fig[2, 1],
- # title="Density",
- xlabel="Time",
- ylabel="Evidence",
- yticksvisible=false,
- xticksvisible=false,
- # ygridvisible=false,
- # yticklabelsvisible=false,
- # xticklabelsvisible=false
-)
-hidespines!(ax2, :t)
-xlims!(ax2; low=0, high=1.5)
-ylims!(ax2; low=-0.5, high=2.5)
-rowgap!(fig.layout, 1, 0.1)
-
-
-# Traces
-function make_points(ν=4, α=1.5, τ=0.2, max_time=1500)
- trace = simulate(Wald(ν, α, τ); Δt=0.001)[2]
-
- x = τ .+ range(0, 0.001 * length(trace), length=length(trace))
- x = collect(x)
-
- points = [(i, j) for (i, j) in zip(x, trace)]
- return Point2f.(points)
-end
-
-for iter in 1:40
- lines!(ax2, @lift(make_points($ν, $α, $τ)),
- color=cgrad(:viridis, 40; categorical=true, alpha=0.8)[iter],
- linewidth=0.5)
-end
-
-# Rest
-xaxis = range(0, 1.5, length=1000)
-lines!(ax1, xaxis, @lift(pdf.(Wald($ν, $α, $τ), xaxis)), color=:orange)
-lines!(ax2, @lift([0, $τ]), [0, 0], color=:red, linewidth=2)
-lines!(ax2, [0, 1.5], @lift([$α, $α]), color=:red, linestyle=:dash)
-lines!(ax2, @lift([$τ, $τ + 1 / 4]), @lift([0, $ν / 4]), color=:black)
-lines!(ax2, @lift([$τ, $τ + 1 / 4]), [0, 0], color=:black, linestyle=:dash)
-
-
-
-fig
-
-
-function make_animation(frame)
- if frame < 0.15
- τ[] = change_param(frame; frame_range=(0, 0.15), param_range=(0.05, 0.2))
- end
- if frame >= 0.25 && frame < 0.40
- α[] = change_param(frame; frame_range=(0.25, 0.40), param_range=(1.5, 2.4))
- end
- if frame >= 0.45 && frame < 0.65
- ν[] = change_param(frame; frame_range=(0.45, 0.65), param_range=(3.0, 5.0))
- end
- # Return to normal
- if frame >= 0.7 && frame < 0.85
- α[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(2.4, 1.5))
- τ[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(0.2, 0.05))
- ν[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(5.0, 3.0))
- end
-end
-
-# animation settings
-frames = range(0, 1, length=90)
-record(make_animation, fig, "rt_wald2.gif", frames; framerate=20)
-
-
-
diff --git a/content/media/animations_rt2.jl b/content/media/animations_rt2.jl
new file mode 100644
index 0000000..e641136
--- /dev/null
+++ b/content/media/animations_rt2.jl
@@ -0,0 +1,316 @@
+using CSV
+using DataFrames
+using Distributions
+using SequentialSamplingModels
+using GLMakie
+using Downloads
+using Random
+
+# Data ==========================================================================================
+cd(@__DIR__)
+df = CSV.read(Downloads.download("https://raw.githubusercontent.com/DominiqueMakowski/CognitiveModels/main/data/wagenmakers2008.csv"), DataFrame)
+
+
+function rescale_param(p; original_range=(-1, 1), new_range=(-3, 3))
+ p = (p - original_range[1]) / (original_range[2] - original_range[1])
+ p = p * (new_range[2] - new_range[1]) + new_range[1]
+ return p
+end
+
+function change_param(frame; frame_range=(0, 1), param_range=(0, 1))
+ frame = rescale_param(frame; original_range=frame_range, new_range=(1π, 2π))
+ p = rescale_param(cos(frame); original_range=(-1, 1), new_range=param_range)
+ return p
+end
+
+
+# Random Walk =====================================================================================
+
+# Functions
+function random_walk(n)
+ x = zeros(n)
+ for i in 2:n
+ x[i] = x[i-1] + rand([-1, 1])
+ end
+ return x
+end
+
+function generate_trace(n, i=1)
+ y = random_walk(n)
+ x = range(0, 0.7, length=n)
+ return DataFrame(x=x, y=y, iteration=i)
+end
+
+# Animation settings
+n_frames = 240
+frames = range(0, 1, length=n_frames)
+
+# Make trace data
+df = DataFrame(x=Float64[], y=Float64[], iteration=Int64[])
+for i in 1:40
+ df = vcat(df, generate_trace(200, i))
+end
+df.Frame = repeat(frames, inner=Int(ceil(nrow(df) / n_frames)))[1:nrow(df)]
+
+# Find crossing point
+df_crossings = DataFrame(y=Float64[], iteration=Int64[], Frame=Float64[])
+for iter in unique(df.iteration)
+ data = df[df.iteration.==iter, :]
+ # Find y when x closest to 0.7
+ idx = argmin(abs.(data.x .- 0.7))
+ push!(df_crossings, (data.y[idx], iter, data.Frame[idx]))
+end
+
+# Density
+density_points = Observable([0])
+density_alpha = Observable(0)
+
+# Initialize the figure
+fig = Figure()
+ax1 = Axis(
+ fig[1, 1],
+ title="Random Walk",
+ xlabel="Time",
+ # ylabel="Evidence",
+ yticksvisible=false,
+ xticksvisible=false,
+ yticklabelsvisible=false
+)
+xlims!(ax1, 0, 0.73)
+ylims!(ax1, -40, 40)
+colsize!(fig.layout, 1, Relative(2 / 3))
+hidespines!(ax1, :r)
+
+ax2 = Axis(
+ fig[1, 2],
+ title="Crossing Distribution",
+ # xlabel="Time",
+ # ylabel="Evidence",
+ yticksvisible=false,
+ xticksvisible=false,
+ yticklabelsvisible=false,
+ xticklabelsvisible=false
+)
+ylims!(ax2, -40, 40)
+xlims!(ax2; low=0)
+hidespines!(ax2, :l)
+colgap!(fig.layout, 1, 0.1)
+
+
+points = Dict()
+for iter in unique(df.iteration)
+ points["i"*string(iter)] = Observable(Point2f[(0, 0)])
+end
+
+for iter in unique(df.iteration)
+ lines!(ax1, points["i"*string(iter)], color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=0.9)[iter])
+end
+vlines!(ax1, 0.7, color=:red, linewidth=2, linestyle=:dash)
+density!(ax2, density_points, npoints=1000, direction=:y,
+ color=@lift((:orange, $density_alpha)))
+fig
+
+function make_animation(frame)
+ # Trace lines
+ data = df[df.Frame.==frame, :]
+ for row in eachrow(data)
+ iter = row.iteration
+ new_point = Point2f(row.x, row.y)
+ points["i"*string(iter)][] = push!(points["i"*string(iter)][], new_point)
+ end
+
+ # Cross points
+ cross = df_crossings[df_crossings.Frame.==frame, :]
+ if nrow(cross) > 0
+ # ax1
+ lines!(ax1, [0.7, 0.73], [cross.y[1], cross.y[1]],
+ color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=1)[cross.iteration[1]])
+ # ax2
+ density_alpha[] = 1
+ scatter!(ax2, 0.0, cross.y[1], markersize=20,
+ color=cgrad(:viridis, length(unique(df.iteration)); categorical=true, alpha=0.8)[cross.iteration[1]])
+ density_points[] = push!(density_points[], cross.y[1])
+ end
+end
+
+# animation settings
+record(make_animation, fig, "rt_randomwalk.gif", frames; framerate=30)
+
+
+
+# Walk Generation =====================================================================================
+
+
+using DataFrames
+using SequentialSamplingModels
+
+α = Observable(1.5)
+τ = Observable(0.05)
+ν = Observable(3.0)
+
+# Initialize the figure
+fig = Figure()
+ax1 = Axis(
+ fig[1, 1],
+ title=@lift("Wald(ν = $(round($ν, digits = 1)), α = $(round($α, digits = 1)), τ = $(round($τ, digits = 2)))"),
+ # xlabel="Time",
+ ylabel="Distribution",
+ yticksvisible=false,
+ xticksvisible=false,
+ yticklabelsvisible=false,
+ xticklabelsvisible=false
+)
+hidespines!(ax1, :b)
+xlims!(ax1; low=0, high=1.5)
+ylims!(ax1; low=0, high=3.5)
+
+ax2 = Axis(
+ fig[2, 1],
+ # title="Density",
+ xlabel="Time",
+ ylabel="Evidence",
+ yticksvisible=false,
+ xticksvisible=false,
+ # ygridvisible=false,
+ # yticklabelsvisible=false,
+ # xticklabelsvisible=false
+)
+hidespines!(ax2, :t)
+xlims!(ax2; low=0, high=1.5)
+ylims!(ax2; low=-0.5, high=2.5)
+rowgap!(fig.layout, 1, 0.1)
+
+
+# Traces
+function make_points(ν=4, α=1.5, τ=0.2)
+ trace = simulate(Wald(ν, α, τ); Δt=0.001)[2]
+
+ x = τ .+ range(0, 0.001 * length(trace), length=length(trace))
+ x = collect(x)
+
+ points = [(i, j) for (i, j) in zip(x, trace)]
+ return Point2f.(points)
+end
+
+for iter in 1:40
+ lines!(ax2, @lift(make_points($ν, $α, $τ)),
+ color=cgrad(:viridis, 40; categorical=true, alpha=0.8)[iter],
+ linewidth=0.5)
+end
+
+# Rest
+xaxis = range(0, 1.5, length=1000)
+lines!(ax1, xaxis, @lift(pdf.(Wald($ν, $α, $τ), xaxis)), color=:orange)
+lines!(ax2, @lift([0, $τ]), [0, 0], color=:red, linewidth=2)
+lines!(ax2, [0, 1.5], @lift([$α, $α]), color=:red, linestyle=:dash)
+lines!(ax2, @lift([$τ, $τ + 1 / 4]), @lift([0, $ν / 4]), color=:black)
+lines!(ax2, @lift([$τ, $τ + 1 / 4]), [0, 0], color=:black, linestyle=:dash)
+
+
+
+fig
+
+
+function make_animation(frame)
+ if frame < 0.15
+ τ[] = change_param(frame; frame_range=(0, 0.15), param_range=(0.05, 0.2))
+ end
+ if frame >= 0.25 && frame < 0.40
+ α[] = change_param(frame; frame_range=(0.25, 0.40), param_range=(1.5, 2.4))
+ end
+ if frame >= 0.45 && frame < 0.65
+ ν[] = change_param(frame; frame_range=(0.45, 0.65), param_range=(3.0, 5.0))
+ end
+ # Return to normal
+ if frame >= 0.7 && frame < 0.85
+ α[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(2.4, 1.5))
+ τ[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(0.2, 0.05))
+ ν[] = change_param(frame; frame_range=(0.7, 0.85), param_range=(5.0, 3.0))
+ end
+end
+
+# animation settings
+frames = range(0, 1, length=90)
+record(make_animation, fig, "rt_wald2.gif", frames; framerate=20)
+
+
+# DDM =====================================================================================
+
+Random.seed!(123)
+
+ν = Observable(1.0)
+α = Observable(1.0)
+τ = Observable(0.05)
+z = Observable(0.5)
+# yorigin = z[] * α[]
+
+function make_points(ν, α, z, τ)
+ x, y = simulate(DDM(ν, α, z, τ); Δt=0.001)
+
+ x = τ .+ x
+
+ points = [(i, j) for (i, j) in zip(x, y)]
+ return Point2f.(points)
+end
+
+# Initialize the figure
+fig = Figure()
+ax1 = Axis(
+ fig[1, 1],
+ title=@lift("Drift Diffusion Model (ν = $(round($ν, digits = 1)), α = $(round($α, digits = 1)), z = $(round($z, digits = 1)), τ = $(round($τ, digits = 2)))"),
+ xlabel="Time",
+ ylabel="Evidence",
+ yticksvisible=false,
+ xticksvisible=false,
+)
+
+for iter in 1:10
+ lines!(ax1, @lift(make_points($ν, $α, $z, $τ)), color=(:black, 0.5))
+end
+
+
+
+x = range(-0.1, 1.1, length=1000)
+
+lines!(ax1, x, @lift($α .+ pdf.(DDM($ν, $α, $z, $τ), 1, x)), color=:green)
+lines!(ax1, x, @lift(-pdf.(DDM($ν, $α, $z, $τ), 0, x)), color=:red)
+
+fig
+
+# Bounds
+hlines!(ax1, @lift([$α]), color=:green, linestyle=:dash, label="Correct")
+hlines!(ax1, [0], color=:red, linestyle=:dash, label="Incorrect")
+
+# Slope
+lines!(ax1, @lift([$τ, $τ + 1 / 6]), @lift([$z * $α, $z * $α + $ν / 6]); color=:orange, linewidth=4)
+
+# Starting
+scatter!(ax1, @lift([0, $τ]), @lift([$z * $α, $z * $α]), color=:purple, markersize=10)
+lines!(ax1, @lift([0, $τ]), @lift([$z * $α, $z * $α]), color=:purple)
+axislegend("Answer"; position=:rt)
+
+function make_animation(frame)
+ if frame < 0.1
+ τ[] = change_param(frame; frame_range=(0, 0.1), param_range=(0.05, 0.2))
+ end
+ if frame >= 0.2 && frame < 0.3
+ α[] = change_param(frame; frame_range=(0.2, 0.3), param_range=(1, 2))
+ end
+ if frame >= 0.4 && frame < 0.5
+ ν[] = change_param(frame; frame_range=(0.4, 0.5), param_range=(1, 4))
+ end
+ if frame >= 0.6 && frame < 0.70
+ z[] = change_param(frame; frame_range=(0.6, 0.70), param_range=(0.5, 0.1))
+ end
+ # Return to normal
+ if frame >= 0.8 && frame < 0.85
+ τ[] = change_param(frame; frame_range=(0.8, 0.85), param_range=(0.2, 0.05))
+ α[] = change_param(frame; frame_range=(0.8, 0.85), param_range=(2, 1))
+ ν[] = change_param(frame; frame_range=(0.8, 0.85), param_range=(4, 1))
+ z[] = change_param(frame; frame_range=(0.8, 0.85), param_range=(0.1, 0.5))
+ end
+end
+
+# animation settings
+frames = range(0, 1, length=120)
+record(make_animation, fig, "rt_ddm.gif", frames; framerate=15)
\ No newline at end of file
diff --git a/content/media/animations_scales.jl b/content/media/animations_scales.jl
index 634ba05..56d49ee 100644
--- a/content/media/animations_scales.jl
+++ b/content/media/animations_scales.jl
@@ -23,13 +23,53 @@ end
# BetaMod =======================================================================================
+using Turing, Distributions, Random
# Reparameterized Beta distribution
-function BetaMod(μ, σ)
- α = μ * (μ * (1 - μ) / σ^2 - 1)
- β = α * (1 - μ) / μ
+function MeanVarBeta(μ, σ²)
+ if σ² <= 0 || σ² >= μ * (1 - μ)
+ error("Variance σ² must be in the interval (0, μ*(1-μ)=$(μ*(1-μ))).")
+ end
+
+ ν = μ * (1 - μ) / σ² - 1
+ α = μ * ν
+ β = (1 - μ) * ν
+
return Beta(α, β)
end
+var(MeanVarBeta(0.3, 0.1))
+mean(MeanVarBeta(0.3, 0.1))
+
+
+# Range of possible parameters
+fig = Figure()
+ax = Axis(fig[1, 1], xlabel="μ", ylabel="variance σ²")
+for μ in range(0.001, 1, length=200)
+ for σ in range(0, 1, length=200)
+ x = range(0, 1, length=100)
+ try
+ y = pdf.(MeanVarBeta(μ, σ), x)
+ scatter!(ax, μ, σ, color=:red)
+ catch
+ continue
+ end
+ end
+end
+ylims!(ax, 0, 0.5)
+ablines!(ax, [0, 1], [1, -1]; color=:black)
+fig
+
+
+@model function model_Beta(x)
+ μ ~ truncated(Beta(1, 1), 0.3, 0.7)
+ σ ~ Uniform(0.05, 0.15)
+ x = MeanVarBeta(μ, σ)
+end
+chains = sample(model_Beta(rand(MeanVarBeta(0.5, 0.1), 100)), NUTS(), 300)
+
+
+
+
μ = Observable(0.5)
σ = Observable(0.1)
@@ -48,7 +88,7 @@ ax = Axis(
ylims!(ax, 0, 10)
x = range(0, 1, length=100)
-y = @lift(pdf.(BetaMod($μ, $σ), x))
+y = @lift(pdf.(MeanVarBeta($μ, $σ), x))
lines!(ax, x, y)
fig
@@ -72,5 +112,5 @@ end
# animation settings
frames = range(0, 1, length=120)
-record(make_animation, fig, "scales_betamod.gif", frames; framerate=20)
+record(make_animation, fig, "scales_MeanVarBeta.gif", frames; framerate=20)
diff --git a/content/media/cover.png b/content/media/cover.png
index ab20b5a..508a588 100644
Binary files a/content/media/cover.png and b/content/media/cover.png differ
diff --git a/content/media/cover.psd b/content/media/cover.psd
index f08a484..dbc67ed 100644
Binary files a/content/media/cover.psd and b/content/media/cover.psd differ
diff --git a/content/media/cover_icon.png b/content/media/cover_icon.png
deleted file mode 100644
index 0a50a80..0000000
Binary files a/content/media/cover_icon.png and /dev/null differ
diff --git a/content/media/rt_ddm.gif b/content/media/rt_ddm.gif
new file mode 100644
index 0000000..e4bbcc0
Binary files /dev/null and b/content/media/rt_ddm.gif differ
diff --git a/content/media/rt_exgaussian2.gif b/content/media/rt_exgaussian2.gif
index 2f3d8f4..025e23f 100644
Binary files a/content/media/rt_exgaussian2.gif and b/content/media/rt_exgaussian2.gif differ