diff --git a/HISTORY.md b/HISTORY.md
index 4813230f7..16b720f94 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -3,6 +3,9 @@
  * Make a few tests more robust
    ([#228](https://github.com/mlpack/ensmallen/pull/228)).
 
+ * Add Nelder-Mead optimizer
+   ([#231](https://github.com/mlpack/ensmallen/pull/231)).
+
 ### ensmallen 2.14.2: "No Direction Home"
 ###### 2020-08-31
  * Fix implementation of fonesca fleming problem function f1 and f2 
diff --git a/doc/optimizers.md b/doc/optimizers.md
index a99151421..0b874e386 100644
--- a/doc/optimizers.md
+++ b/doc/optimizers.md
@@ -1246,7 +1246,7 @@ can be paired with the `Lookahead` optimizer.
 | `BaseOptimizerType` | **`baseOptimizer`** |  Optimizer for the forward step. | Adam |
 | `double` | **`stepSize`** | Step size for each iteration. | `0.5` |
 | `size_t` | **`k`** | The synchronization period. | `5` |
-| `size_t` | **`max_iterations`** | Maximum number of iterations allowed (0 means no limit). | `100000` |
+| `size_t` | **`maxIterations`** | Maximum number of iterations allowed (0 means no limit). | `100000` |
 | `double` | **`tolerance`** | Maximum absolute tolerance to terminate algorithm. | `1e-5` |
 | `DecayPolicyType` | **`decayPolicy`** | Instantiated decay policy used to adjust the step size. | `DecayPolicyType()` |
 | `bool` | **`exactObjective`** | Calculate the exact objective (Default: estimate the final objective obtained on the last pass over the data). | `false` |
@@ -1278,6 +1278,61 @@ optimizer.Optimize(f, coordinates);
  * [Lookahead Optimizer: k steps forward, 1 step back](https://arxiv.org/abs/1907.08610)
  * [Differentiable separable functions](#differentiable-separable-functions)
 
+## Nelder-Mead
+
+*An optimizer for [arbitrary functions](#arbitrary-functions).*
+
+The Nelder-Mead (NM) method (also called downhill simplex method is a heuristic
+(search method for minimizing an objective function given in an N-dimensional
+space. The key concept of the method is the simplex, an N dimensional polytope
+that is a convex hull of a set of N + 1 linearly independent points. The method
+is an iterative process which transforms an initial simplex iteratively into a
+different one following a path in the space along which the function values are
+progressively reduced.
+
+#### Constructors
+
+ * `NelderMead()`
+ * `NelderMead(`_`maxIterations, tolerance, simplexer`_`)`
+ * `NelderMead(`_`alpha, beta, gamma, delta, maxIterations, tolerance, simplexer`_`)`
+
+#### Attributes
+
+| **type** | **name** | **description** | **default** |
+|----------|----------|-----------------|-------------|
+| `double` | **`alpha`** | The reflection parameter. | `` |
+| `double` | **`beta`** | The expansion parameter. | `` |
+| `double` | **`gamma`** | The contraction parameter. | `` |
+| `double` | **`delta`** | The shrink step parameter. | `` |
+| `size_t` | **`maxIterations`** | Maximum number of iterations allowed (0 means no limit). | `100000` |
+| `double` | **`tolerance`** | The final value of the objective function for termination. If set to negative value, tolerance is not considered. | `1e-15` |
+| `SimplexerType` | **`simplexer`** | The simplex policy used to construct the initial simplex. | `AffineSimplexer` |
+
+Attributes of the optimizer may also be changed via the member methods
+`Alpha()`, `Beta()`, `Gamma()`, `Delta()`, `MaxIterations()`,
+`Tolerance()` and `Simplexer()`.
+
+#### Examples:
+
+<details open>
+<summary>Click to collapse/expand example code.
+</summary>
+
+```c++
+RosenbrockFunction f;
+arma::mat coordinates = f.GetInitialPoint();
+
+NelderMead optimizer;
+optimizer.Optimize(f, coordinates);
+```
+
+</details>
+
+#### See also:
+
+ * [Nelder-Mead in Wikipedia](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method)
+ * [Arbitrary functions](#arbitrary-functions)
+
 ## LRSDP (low-rank SDP solver)
 
 *An optimizer for [semidefinite programs](#semidefinite-programs).*
diff --git a/include/ensmallen.hpp b/include/ensmallen.hpp
index 018e0f2a7..cfdfd1eb3 100644
--- a/include/ensmallen.hpp
+++ b/include/ensmallen.hpp
@@ -103,6 +103,7 @@
 #include "ensmallen_bits/parallel_sgd/parallel_sgd.hpp"
 #include "ensmallen_bits/pso/pso.hpp"
 #include "ensmallen_bits/rmsprop/rmsprop.hpp"
+#include "ensmallen_bits/nelder_mead/nelder_mead.hpp"
 
 #include "ensmallen_bits/sa/sa.hpp"
 #include "ensmallen_bits/sarah/sarah.hpp"
diff --git a/include/ensmallen_bits/nelder_mead/affine_simplexer.hpp b/include/ensmallen_bits/nelder_mead/affine_simplexer.hpp
new file mode 100644
index 000000000..a53b247c2
--- /dev/null
+++ b/include/ensmallen_bits/nelder_mead/affine_simplexer.hpp
@@ -0,0 +1,60 @@
+/**
+ * @file affine_simplexer.hpp
+ * @author Marcus Edel
+ *
+ * Definition of a simple affine simplex to ensure that the representation of
+ * points in the affine hull is unique.
+ *
+ * ensmallen is free software; you may redistribute it and/or modify it under
+ * the terms of the 3-clause BSD license.  You should have received a copy of
+ * the 3-clause BSD license along with ensmallen.  If not, see
+ * http://www.opensource.org/licenses/BSD-3-Clause for more information.
+ */
+#ifndef ENSMALLEN_NELDER_MEAD_AFFINE_SIMPLEXER_HPP
+#define ENSMALLEN_NELDER_MEAD_AFFINE_SIMPLEXER_HPP 
+
+namespace ens {
+
+/*
+ * Definition of the affine simplexer.
+ */
+class AffineSimplexer
+{
+ public:
+  /**
+   * Affine simplexer, a simplex is represented by an (n + 1)-dimensional vector
+   * of n-dimensional vectors.  It is used together with the initial vector to
+   * create the initial simplex. To construct the ith vertex, the simplexer
+   * multiplies entry i in the initial vector with a constant.
+   *
+   * @param simplex Constructed simplex.
+   * @param iterate Intial starting point.
+   * @param function Function to optimize.
+   */
+  template<typename FunctionType, typename MatType>
+  void Simplex(MatType& simplex,
+               const MatType& iterate,
+               FunctionType& /* function */)
+  {
+    // Convenience typedefs.
+    typedef typename MatType::elem_type ElemType;
+
+    // Construct the initial simplex.
+    // Large initial simplex is used.
+    const ElemType scalingFactor = std::min(std::max(
+        arma::as_scalar(arma::max(
+        arma::abs(iterate))), (ElemType) 1.0), (ElemType) 10.0);
+
+    simplex = arma::eye<MatType>(iterate.n_rows, iterate.n_rows + 1);
+    simplex.col(iterate.n_rows) = (1.0 -
+        std::sqrt((ElemType) (iterate.n_rows + 1))) /
+        (ElemType) iterate.n_rows * arma::ones<MatType>(iterate.n_rows, 1);
+    
+    simplex *= scalingFactor;
+    simplex.each_col() += iterate;
+  }
+};
+
+} // namespace ens
+
+#endif
diff --git a/include/ensmallen_bits/nelder_mead/nelder_mead.hpp b/include/ensmallen_bits/nelder_mead/nelder_mead.hpp
new file mode 100644
index 000000000..00dee1c87
--- /dev/null
+++ b/include/ensmallen_bits/nelder_mead/nelder_mead.hpp
@@ -0,0 +1,234 @@
+/**
+ * @file nelder_mead.hpp
+ * @author Marcus Edel
+ *
+ * Definition of an Nelder-Mead with adaptive parameters proposed by F. Gao and
+ * L. Han in "Implementing the Nelder-Mead simplex algorithm with adaptive
+ * parameters".
+ *
+ * ensmallen is free software; you may redistribute it and/or modify it under
+ * the terms of the 3-clause BSD license.  You should have received a copy of
+ * the 3-clause BSD license along with ensmallen.  If not, see
+ * http://www.opensource.org/licenses/BSD-3-Clause for more information.
+ */
+#ifndef ENSMALLEN_NELDER_MEAD_HPP
+#define ENSMALLEN_NELDER_MEAD_HPP
+
+#include "affine_simplexer.hpp"
+
+namespace ens {
+
+/**
+ * Nelder-Mead is a direct search method. It keeps track of the function value
+ * at a number of points in the search space and iteratively generates a
+ * sequence of simplices to approximate an optimal point.
+ *
+ * For more information, please refer to:
+ *
+ * @code
+ * @article{Nelder1965ASM,
+ *   author  = {J. Nelder and R. Mead},
+ *   title   = {A Simplex Method for Function Minimization},
+ *   year    = {1965},
+ *   journal = {Comput. J.},
+ *   volume  = {7},
+ *   pages   = {308-313}
+ * }
+ * @endcode
+ *
+ * @code
+ * @article{Fuchang2012,
+ *   author    = {Gao, Fuchang and Han, Lixing},
+ *   title     = {Implementing the Nelder-Mead Simplex Algorithm with Adaptive
+ *                Parameters},
+ *   year      = {2012},
+ *   publisher = {Kluwer Academic Publishers},
+ *   volume    = {51},
+ *   number    = {1},
+ *   journal   = {Comput. Optim. Appl.},
+ *   month     = jan,
+ *   pages     = {259–277}
+ * }
+ * @endcode
+ *
+ * NelderMead can optimize arbitrary functions.  For more details, see the
+ * documentation on function types included with this distribution or on the
+ * ensmallen website.
+ *
+ * @tparam SimplexerType Simplex policy used by Nelder Mead to construct the
+ * initial simplex.
+ */
+template<typename SimplexerType = AffineSimplexer>
+class NelderMeadType
+{
+ public:
+  /**
+   * Construct the NelderMead optimizer with the default adaptive parameters
+   * scheme as used in "Implementing the Nelder-Mead simplex algorithm with
+   * adaptive parameters" by F. Gao and L. Han (2010). These are based on the
+   * dimensionality of the problem, and are given by
+   *
+   * alpha = 1
+   * beta = 1 + 2 / n
+   * gamma = 0.75 - 0.5 / n
+   * delta = 1 - 1 / n
+   *
+   * where n is the dimensionality of the problem.
+   *
+   * The defaults here are not necessarily good for the given problem, so it is
+   * suggested that the values used be tailored to the task at hand.  The
+   * maximum number of iterations refers to the maximum number of points that
+   * are processed (i.e., one iteration equals one point; one iteration does not
+   * equal one pass over the dataset).
+   *
+   * @param maxIterations Maximum number of iterations allowed (0 means no
+   *     limit).
+   * @param tolerance Maximum absolute tolerance to terminate algorithm.
+   * @param simplexer The simplex policy used to construct the initial simplex.
+   */
+  NelderMeadType(const size_t maxIterations = 100000,
+                 const double tolerance = 1e-15,
+                 const SimplexerType& simplexer = SimplexerType());
+  /**
+   * Construct the NelderMead optimizer with the given parameters. The defaults
+   * here are not necessarily good for the given problem, so it is suggested
+   * that the values used be tailored to the task at hand.  The maximum number
+   * of iterations refers to the maximum number of points that are processed
+   * (i.e., one iteration equals one point; one iteration does not equal one
+   * pass over the dataset).
+   *
+   * @param alpha The reflection parameter.
+   * @param beta The expansion parameter.
+   * @param gamma The contraction parameter.
+   * @param delta The shrink step parameter.
+   * @param maxIterations Maximum number of iterations allowed (0 means no
+   *     limit).
+   * @param tolerance Maximum absolute tolerance to terminate algorithm.
+   * @param simplexer The simplex policy used to construct the initial simplex.
+   */
+  NelderMeadType(const double alpha,
+                 const double beta,
+                 const double gamma,
+                 const double delta,
+                 const size_t maxIterations = 100000,
+                 const double tolerance = 1e-15,
+                 const SimplexerType& simplexer = SimplexerType());
+
+  /**
+   * Optimize the given function using NelderMead. The given starting point will
+   * be modified to store the finishing point of the algorithm, and the final
+   * objective value is returned.
+   *
+   * @tparam ArbitraryFunctionType Type of the function to be optimized.
+   * @tparam MatType Type of matrix to optimize.
+   * @tparam CallbackTypes Types of callback functions.
+   * @param function Function to optimize.
+   * @param iterate Starting point (will be modified).
+   * @param callbacks Callback functions.
+   * @return Objective value of the final point.
+   */
+  template<typename ArbitraryFunctionType,
+           typename MatType,
+           typename... CallbackTypes>
+  typename MatType::elem_type Optimize(ArbitraryFunctionType& function,
+                                       MatType& iterate,
+                                       CallbackTypes&&... callbacks);
+
+  //! Get the reflection parameter.
+  double Alpha() const { return alpha; }
+  //! Modify the reflection parameter.
+  double& Alpha() { return alpha; }
+
+  //! Get the expansion parameter.
+  double Beta() const { return beta; }
+  //! Modify the expansion parameter.
+  double& Beta() { return beta; }
+
+  //! Get the contraction parameter.
+  double Gamma() const { return gamma; }
+  //! Modify the contraction parameter.
+  double& Gamma() { return gamma; }
+
+  //! Get the shrink step parameter.
+  double Delta() const { return delta; }
+  //! Modify the shrink step parameter.
+  double& Delta() { return delta; }
+
+  //! Get the maximum number of iterations (0 indicates no limit).
+  size_t MaxIterations() const { return maxIterations; }
+  //! Modify the maximum number of iterations (0 indicates no limit).
+  size_t& MaxIterations() { return maxIterations; }
+
+  //! Get the tolerance for termination.
+  double Tolerance() const { return tolerance; }
+  //! Modify the tolerance for termination.
+  double& Tolerance() { return tolerance; }
+
+  //! Get the simplexer policy.
+  const SimplexerType& Simplexer() const { return simplexer; }
+  //! Modify the simplexer policy.
+  SimplexerType& Simplexer() { return simplexer; }
+
+ private:
+  /**
+   * Helper function to shift the given order based on the given function
+   * objective.
+   *
+   * @param order The current order of the function objectives.
+   * @param fx The current function values.
+   * @param value Function value to be inserted into the current order.
+   * @return Position of the given objective in the given order.
+   */
+  size_t ShiftOrder(arma::uvec& order,
+                    const arma::rowvec& fx,
+                    const double value)
+  {
+    // Figure out at which index we have to put the new value.
+    size_t i = 0;
+    for (; i < fx.n_elem; ++i)
+      if (fx(order(i)) > value) break;
+
+    // Shift every value after 'i' by one to the right.
+    const size_t tmp = order(fx.n_elem - 1);
+    for (size_t d = fx.n_elem - 1; d > i; --d)
+      order(d) = order(d - 1);
+    order(i) = tmp;
+
+    return i;
+  }
+
+  //! The reflection parameter.
+  double alpha;
+
+  //! The expansion parameter.
+  double beta;
+
+  //! The contraction parameter.
+  double gamma;
+
+  //! The shrink step parameter.
+  double delta;
+
+  //! The maximum number of allowed iterations.
+  size_t maxIterations;
+
+  //! The tolerance for termination.
+  double tolerance;
+
+  //! Locally stored simplexer.
+  SimplexerType simplexer;
+
+  //! Whether to use the default adaptive parameters from the "Implementing the
+  //! Nelder-Mead simplex algorithmwith adaptive parameters" paper.
+  bool adaptiveDefault;
+};
+
+// Convenience typedef.
+using NelderMead = NelderMeadType<AffineSimplexer>;
+
+} // namespace ens
+
+// Include implementation.
+#include "nelder_mead_impl.hpp"
+
+#endif
diff --git a/include/ensmallen_bits/nelder_mead/nelder_mead_impl.hpp b/include/ensmallen_bits/nelder_mead/nelder_mead_impl.hpp
new file mode 100644
index 000000000..5f18d55fb
--- /dev/null
+++ b/include/ensmallen_bits/nelder_mead/nelder_mead_impl.hpp
@@ -0,0 +1,241 @@
+/**
+ * @file nelder_mead_impl.hpp
+ * @author Marcus Edel
+ *
+ * Implementation of an incremental Quasi-Newton with local superlinear
+ * convergence rate as proposed by A. Mokhtari et al. in "IQN: An Incremental
+ * Quasi-Newton Method with Local Superlinear Convergence Rate".
+ *
+ * ensmallen is free software; you may redistribute it and/or modify it under
+ * the terms of the 3-clause BSD license.  You should have received a copy of
+ * the 3-clause BSD license along with ensmallen.  If not, see
+ * http://www.opensource.org/licenses/BSD-3-Clause for more information.
+ */
+#ifndef ENSMALLEN_NELDER_MEAD_IMPL_HPP
+#define ENSMALLEN_NELDER_MEAD_IMPL_HPP
+
+// In case it hasn't been included yet.
+#include "nelder_mead.hpp"
+
+#include <ensmallen_bits/function.hpp>
+
+namespace ens {
+
+template<typename SimplexerType>
+inline NelderMeadType<SimplexerType>::NelderMeadType(
+    const size_t maxIterations,
+    const double tolerance,
+    const SimplexerType& simplexer) :
+    alpha(0),
+    beta(0),
+    gamma(0),
+    delta(0),
+    maxIterations(maxIterations),
+    tolerance(tolerance),
+    simplexer(simplexer),
+    adaptiveDefault(true)
+{ /* Nothing to do. */ }
+
+template<typename SimplexerType>
+inline NelderMeadType<SimplexerType>::NelderMeadType(
+    const double alpha,
+    const double beta,
+    const double gamma,
+    const double delta,
+    const size_t maxIterations,
+    const double tolerance,
+    const SimplexerType& simplexer) :
+    alpha(alpha),
+    beta(beta),
+    gamma(gamma),
+    delta(delta),
+    maxIterations(maxIterations),
+    tolerance(tolerance),
+    simplexer(simplexer),
+    adaptiveDefault(false)
+{ /* Nothing to do. */ }
+
+//! Optimize the function.
+template<typename SimplexerType>
+template<typename ArbitraryFunctionType,
+         typename MatType,
+         typename... CallbackTypes>
+typename MatType::elem_type NelderMeadType<SimplexerType>::Optimize(
+    ArbitraryFunctionType& function,
+    MatType& iterate,
+    CallbackTypes&&... callbacks)
+{
+  // Convenience typedefs.
+  typedef typename MatType::elem_type ElemType;
+  typedef typename MatTypeTraits<MatType>::BaseMatType BaseMatType;
+
+  // Make sure that we have the methods that we need.  Long name...
+  traits::CheckArbitraryFunctionTypeAPI<ArbitraryFunctionType,
+      BaseMatType>();
+  RequireDenseFloatingPointType<BaseMatType>();
+
+  // Controls early termination of the optimization process.
+  bool terminate = false;
+
+  // Controls the shrink step.
+  bool shrink = false;
+
+  // Problem dimensionality.
+  const size_t dim = iterate.n_rows;
+
+  // Use default adaptive parameters scheme as used in "Implementing the
+  // Nelder-Mead simplex algorithm with adaptive parameters" by F. Gao and L.
+  // Han (2010).
+  if (adaptiveDefault)
+  {
+    alpha = 1;
+    beta = 1.0 + 2.0 / (double) dim;
+    gamma = 0.75 - 0.5 / (double) dim;
+    delta = 1.0 - 1.0 / (double) dim;
+  }
+
+  // Construct the initial simplex.
+  MatType simplex;
+  simplexer.Simplex(simplex, iterate, function);
+
+  arma::rowvec fx(dim + 1);
+  for (size_t i = 0; i < dim + 1; ++i)
+  {
+    fx(i) = function.Evaluate(simplex.col(i));
+    Callback::Evaluate(*this, function, simplex.col(i), fx(i), callbacks...);
+  }
+
+  // Get the indices that correspond to the ordering of the function values
+  // at the vertices. order(0) is the index in the simplex of the vertex
+  // with the lowest function value, and order(dim) is the index in the
+  // simplex of the vertex with the highest function value.
+  arma::uvec order = arma::sort_index(fx);
+
+  terminate |= Callback::BeginOptimization(*this, function, iterate,
+      callbacks...);
+  for (size_t i = 1; i != maxIterations && !terminate; ++i)
+  {
+    const MatType M = arma::mean(simplex.cols(order.subvec(0, dim - 1)), 1);
+    const MatType xref = (1.0 + alpha) * M - alpha * simplex.col(order(dim));
+    const ElemType fref = function.Evaluate(xref);
+    Callback::Evaluate(*this, function, xref, fref, callbacks...);
+
+    if (fref < fx(order(0)))
+    {
+      // Compute expansion.
+      const MatType xexp = (1.0 + alpha * beta) * M - alpha * beta *
+          simplex.col(order(dim));
+      const ElemType fexp = function.Evaluate(xexp);
+      Callback::Evaluate(*this, function, xexp, fexp, callbacks...);
+
+      // Shift values by one to the right and swap the last with the first.
+      const size_t fLowest = order(dim);
+      for (size_t d = dim; d > 0; --d)
+        order(d) = order(d - 1);
+      order(0) = fLowest;
+
+      if (fexp < fref)
+      {
+        simplex.col(order(0)) = xexp;
+        fx(order(0)) = fexp;
+      }
+      else
+      {
+        simplex.col(order(0)) = xref;
+        fx(order(0)) = fref;
+      }
+    }
+    else if (fref < fx(order(dim - 1)))
+    {
+      // Accept reflection point.
+      const size_t s = ShiftOrder(order, fx, fref);
+      simplex.col(order(s)) = xref;
+      fx(order(s)) = fref;
+    }
+    else
+    {
+      if (fref < fx(order(dim)))
+      {
+        // Outside contraction.
+        const MatType xoc = (1.0 + alpha * gamma) * M - alpha * gamma *
+            simplex.col(order(dim));
+        const ElemType foc = function.Evaluate(xoc);
+        Callback::Evaluate(*this, function, xoc, foc, callbacks...);
+
+        if (foc <= fref)
+        {
+          const size_t s = ShiftOrder(order, fx, fref);
+          simplex.col(order(s)) = xoc;
+          fx(order(s)) = foc;
+        }
+        else
+        {
+          shrink = true;
+        }
+      }
+      else
+      {
+        // Inside contraction.
+        const MatType xic = (1.0 - gamma) * M + gamma *
+            simplex.col(order(dim));
+        const ElemType fic = function.Evaluate(xic);
+        Callback::Evaluate(*this, function, xic, fic, callbacks...);
+
+        if (fic < fx(order(dim)))
+        {
+          const size_t s = ShiftOrder(order, fx, fic);
+          simplex.col(order(s)) = xic;
+          fx(order(s)) = fic;
+        }
+        else
+        {
+          shrink = true;
+        }
+      }
+    }
+
+    if (shrink)
+    {
+      for (size_t d = 1; d < dim; ++d)
+      {
+        simplex.col(order(d)) = simplex.col(order(0)) + delta *
+            (simplex.col(order(d)) - simplex.col(order(0)));
+        fx(order(d)) = function.Evaluate(simplex.col(order(d)));
+
+        Callback::Evaluate(*this, function, simplex.col(order(d)),
+            fx(order(d)), callbacks...);
+      }
+
+      // Sorting can be slow, however there is overwhelming evidence that shrink
+      // transformations almost never happen in practice. See "Efficient
+      // Implementation of the Nelder–Mead Search Algorithm" by S. Singer and S.
+      // Singer.
+      order = arma::sort_index(fx);
+      fx = fx.cols(order);
+      simplex = simplex.cols(order);
+
+      shrink = false;
+    }
+
+    // Check for termination criteria.
+    if (std::abs(fx(order(dim)) - fx(order(0))) < tolerance)
+    {
+      Info << "NelderMead: minimized within tolerance " << tolerance << "; "
+           << "terminating optimization." << std::endl;
+      break;
+    }
+  }
+
+  // Set the best candidate.
+  iterate = simplex.col(0);
+
+  const ElemType objective = function.Evaluate(iterate);
+  Callback::Evaluate(*this, function, iterate, objective, callbacks...);
+
+  Callback::EndOptimization(*this, function, iterate, callbacks...);
+  return objective;
+}
+
+} // namespace ens
+
+#endif
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 6b55d4a4a..5770cb645 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -24,6 +24,7 @@ set(ENSMALLEN_TESTS_SOURCES
     lookahead_test.cpp
     lrsdp_test.cpp
     momentum_sgd_test.cpp
+    nelder_mead_test.cpp
     nesterov_momentum_sgd_test.cpp
     nsga2_test.cpp
     parallel_sgd_test.cpp
@@ -59,4 +60,4 @@ add_custom_command(TARGET ensmallen_tests
 
 enable_testing()
 add_test(NAME ensmallen_tests COMMAND ensmallen_tests
-WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
\ No newline at end of file
+WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
diff --git a/tests/nelder_mead_test.cpp b/tests/nelder_mead_test.cpp
new file mode 100644
index 000000000..643d8e691
--- /dev/null
+++ b/tests/nelder_mead_test.cpp
@@ -0,0 +1,87 @@
+/**
+ * @file nelder_mead_test.cpp
+ * @author Marcus Edel
+ *
+ * ensmallen is free software; you may redistribute it and/or modify it under
+ * the terms of the 3-clause BSD license.  You should have received a copy of
+ * the 3-clause BSD license along with ensmallen.  If not, see
+ * http://www.opensource.org/licenses/BSD-3-Clause for more information.
+ */
+
+#include <ensmallen.hpp>
+#include "catch.hpp"
+
+using namespace ens;
+using namespace ens::test;
+
+/**
+ * Tests the Nelder-Mead optimizer using the Rosenbrock Function.
+ */
+TEST_CASE("NMRosenbrockFunctionTest", "[NelderMeadTest]")
+{
+  RosenbrockFunction f;
+  NelderMead nm(300, 1e-15);
+
+  arma::mat coords = f.GetInitialPoint();
+  nm.Optimize(f, coords);
+
+  double finalValue = f.Evaluate(coords);
+
+  REQUIRE(finalValue == Approx(0.0).margin(1e-5));
+  REQUIRE(coords(0) == Approx(1.0).epsilon(1e-7));
+  REQUIRE(coords(1) == Approx(1.0).epsilon(1e-7));
+}
+
+/**
+ * Test the Nelder-Mead optimizer using an arma::fmat with the Rosenbrock
+ * function.
+ */
+TEST_CASE("NMRosenbrockFunctionFloatTest", "[NelderMeadTest]")
+{
+  RosenbrockFunction f;
+  NelderMead nm(300, 1e-15);
+
+  arma::fmat coords = f.GetInitialPoint<arma::fvec>();
+  nm.Optimize(f, coords);
+
+  float finalValue = f.Evaluate(coords);
+
+  REQUIRE(finalValue == Approx(0.0f).margin(1e-3));
+  REQUIRE(coords(0) == Approx(1.0f).epsilon(1e-4));
+  REQUIRE(coords(1) == Approx(1.0f).epsilon(1e-4));
+}
+
+/**
+ * Tests the Nelder–Mead optimizer using the Colville Function.
+ */
+TEST_CASE("NMColvilleFunctionTest", "[NelderMeadTest]")
+{
+  ColvilleFunction f;
+  NelderMead nm(500, 1e-15);
+
+  arma::mat coords = f.GetInitialPoint();
+  nm.Optimize(f, coords);
+
+  REQUIRE(coords(0) == Approx(1.0).epsilon(1e-7));
+  REQUIRE(coords(1) == Approx(1.0).epsilon(1e-7));
+}
+
+/**
+ * Tests the Nelder–Mead optimizer using the Wood Function.
+ */
+TEST_CASE("NMWoodFunctionTest", "[NelderMeadTest]")
+{
+  WoodFunction f;
+  NelderMead nm(800, 1e-15);
+
+  arma::mat coords = f.GetInitialPoint();
+  nm.Optimize(f, coords);
+
+  double finalValue = f.Evaluate(coords);
+
+  REQUIRE(finalValue == Approx(0.0).margin(1e-5));
+  REQUIRE(coords(0) == Approx(1.0).epsilon(1e-7));
+  REQUIRE(coords(1) == Approx(1.0).epsilon(1e-7));
+  REQUIRE(coords(2) == Approx(1.0).epsilon(1e-7));
+  REQUIRE(coords(3) == Approx(1.0).epsilon(1e-7));
+}