Optimization In Multiple Dimensions in C# QuickStart Sample

Illustrates the use of the multi-dimensional optimizer classes in the Extreme.Mathematics.Optimization namespace for optimization in multiple dimensions in C#.

View this sample in: Visual Basic F# IronPython

using System;
// The optimization classes reside in the
// Extreme.Mathematics.Optimization namespace.
using Extreme.Mathematics.Optimization;
// Function delegates reside in the Extreme.Mathematics
// namespace.
using Extreme.Mathematics;
// Vectors reside in the Extreme.Mathematics.LinearAlgebra
// namespace.
using Extreme.Mathematics.LinearAlgebra;

namespace Extreme.Numerics.QuickStart.CSharp
{
    /// <summary>
    /// Illustrates unconstrained optimization in multiple dimensions
    /// using classes in the Extreme.Mathematics.Optimization 
    /// namespace of Extreme Numerics.NET.
    /// </summary>
    class OptimizationInND
    {
        static void Main(string[] args)
        {
            // The license is verified at runtime. We're using
            // a demo license here. For more information, see
            // https://numerics.net/trial-key
            Extreme.License.Verify("Demo license");
            //
            // Objective function
            //

            // The objective function must be supplied as a
            // Func<Vector<double>, double> delegate. This is a method 
            // that takes one var argument and returns a real number.
            // See the end of this sample for definitions of the methods 
            // that are referenced here.
            Func<Vector<double>, double> f = fRosenbrock;

            // The gradient of the objective function can be supplied either
            // as a MultivariateVectorFunction delegate, or a
            // MultivariateVectorFunction delegate. The former takes
            // one vector argument and returns a vector. The latter
            // takes a second vector argument, which is an existing
            // vector that is used to return the result.
            Func<Vector<double>, Vector<double>, Vector<double>> g = gRosenbrock;

            // The initial values are supplied as a vector:
            var initialGuess = Vector.Create(-1.2, 1);
            // The actual solution is [1, 1].

            //
            // Quasi-Newton methods: BFGS and DFP
            //

            // For most purposes, the quasi-Newton methods give
            // excellent results. There are two variations: DFP and
            // BFGS. The latter gives slightly better results.

            // Which method is used, is specified by a constructor
            // parameter of type QuasiNewtonMethod:
            var bfgs = new QuasiNewtonOptimizer(QuasiNewtonMethod.Bfgs);

            bfgs.InitialGuess = initialGuess;
            bfgs.ExtremumType = ExtremumType.Minimum;

            // Set the ObjectiveFunction:
            bfgs.ObjectiveFunction = f;
            // Set either the GradientFunction or FastGradientFunction:
            bfgs.FastGradientFunction = g;
            // The FindExtremum method does all the hard work:
            bfgs.FindExtremum();

            Console.WriteLine("BFGS Method:");
            Console.WriteLine("  Solution: {0}", bfgs.Extremum);
            Console.WriteLine("  Estimated error: {0}", bfgs.EstimatedError);
            Console.WriteLine("  # iterations: {0}", bfgs.IterationsNeeded);
            // Optimizers return the number of function evaluations
            // and the number of gradient evaluations needed:
            Console.WriteLine("  # function evaluations: {0}", bfgs.EvaluationsNeeded);
            Console.WriteLine("  # gradient evaluations: {0}", bfgs.GradientEvaluationsNeeded);
if 


            //
            // Conjugate Gradient methods
            //

            // Conjugate gradient methods exist in three variants:
            // Fletcher-Reeves, Polak-Ribiere, and positive Polak-Ribiere.

            // Which method is used, is specified by a constructor
            // parameter of type ConjugateGradientMethod:
            ConjugateGradientOptimizer cg = 
                new ConjugateGradientOptimizer(ConjugateGradientMethod.PositivePolakRibiere);
            // Everything else works as before:
            cg.ObjectiveFunction = f;
            cg.FastGradientFunction = g;
            cg.InitialGuess = initialGuess;
            cg.FindExtremum();

            Console.WriteLine("Conjugate Gradient Method:");
            Console.WriteLine("  Solution: {0}", cg.Extremum);
            Console.WriteLine("  Estimated error: {0}", cg.EstimatedError);
            Console.WriteLine("  # iterations: {0}", cg.IterationsNeeded);
            Console.WriteLine("  # function evaluations: {0}", cg.EvaluationsNeeded);
            Console.WriteLine("  # gradient evaluations: {0}", cg.GradientEvaluationsNeeded);

            //
            // Powell's method
            //

            // Powell's method is a conjugate gradient method that
            // does not require the derivative of the objective function.
            // It is implemented by the PowellOptimizer class:
            var pw = new PowellOptimizer();
            pw.InitialGuess = initialGuess;
            // Powell's method does not use derivatives:
            pw.ObjectiveFunction = f;
            pw.FindExtremum();

            Console.WriteLine("Powell's Method:");
            Console.WriteLine("  Solution: {0}", pw.Extremum);
            Console.WriteLine("  Estimated error: {0}", pw.EstimatedError);
            Console.WriteLine("  # iterations: {0}", pw.IterationsNeeded);
            Console.WriteLine("  # function evaluations: {0}", pw.EvaluationsNeeded);
            Console.WriteLine("  # gradient evaluations: {0}", pw.GradientEvaluationsNeeded);

            //
            // Nelder-Mead method
            //

            // Also called the downhill simplex method, the method of Nelder 
            // and Mead is useful for functions that are not tractable 
            // by other methods. For example, other methods
            // may fail if the objective function is not continuous.
            // Otherwise it is much slower than other methods.

            // The method is implemented by the NelderMeadOptimizer class:
            var nm = new NelderMeadOptimizer();

            // The class has three special properties, that help determine
            // the progress of the algorithm. These parameters have
            // default values and need not be set explicitly.
            nm.ContractionFactor = 0.5;
            nm.ExpansionFactor = 2;
            nm.ReflectionFactor = -2;

            // Everything else is the same.
            nm.SolutionTest.AbsoluteTolerance = 1e-15;
            nm.InitialGuess = initialGuess;
            // The method does not use derivatives:
            nm.ObjectiveFunction = f;
            nm.FindExtremum();

            Console.WriteLine("Nelder-Mead Method:");
            Console.WriteLine("  Solution: {0}", nm.Extremum);
            Console.WriteLine("  Estimated error: {0}", nm.EstimatedError);
            Console.WriteLine("  # iterations: {0}", nm.IterationsNeeded);
            Console.WriteLine("  # function evaluations: {0}", nm.EvaluationsNeeded);

            Console.Write("Press Enter key to exit...");
            Console.ReadLine();
        }

        // The famous Rosenbrock test function.
        static double fRosenbrock(Vector<double> x)
        {
            double p = (1-x[0]);
            double q = x[1] - x[0]*x[0];
            return p*p + 105 * q*q;
        }

        // Gradient of the Rosenbrock test function.
        static Vector<double> gRosenbrock(Vector<double> x, Vector<double> f)
        {
            // Always assume that the second argument may be null:
            if (f == null)
                f = Vector.Create<double>(2);
            double p = (1-x[0]);
            double q = x[1] - x[0]*x[0];
            f[0] = -2*p - 420*x[0]*q;
            f[1] = 210*q;
            return f;
        }
    }
}