R1ToRd.java

  1. package org.drip.function.definition;

  2. /*
  3.  * -*- mode: java; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  4.  */

  5. /*!
  6.  * Copyright (C) 2020 Lakshmi Krishnamurthy
  7.  * Copyright (C) 2019 Lakshmi Krishnamurthy
  8.  * Copyright (C) 2018 Lakshmi Krishnamurthy
  9.  * Copyright (C) 2017 Lakshmi Krishnamurthy
  10.  * Copyright (C) 2016 Lakshmi Krishnamurthy
  11.  * Copyright (C) 2015 Lakshmi Krishnamurthy
  12.  *
  13.  *  This file is part of DROP, an open-source library targeting analytics/risk, transaction cost analytics,
  14.  *      asset liability management analytics, capital, exposure, and margin analytics, valuation adjustment
  15.  *      analytics, and portfolio construction analytics within and across fixed income, credit, commodity,
  16.  *      equity, FX, and structured products. It also includes auxiliary libraries for algorithm support,
  17.  *      numerical analysis, numerical optimization, spline builder, model validation, statistical learning,
  18.  *      and computational support.
  19.  *  
  20.  *      https://lakshmidrip.github.io/DROP/
  21.  *  
  22.  *  DROP is composed of three modules:
  23.  *  
  24.  *  - DROP Product Core - https://lakshmidrip.github.io/DROP-Product-Core/
  25.  *  - DROP Portfolio Core - https://lakshmidrip.github.io/DROP-Portfolio-Core/
  26.  *  - DROP Computational Core - https://lakshmidrip.github.io/DROP-Computational-Core/
  27.  *
  28.  *  DROP Product Core implements libraries for the following:
  29.  *  - Fixed Income Analytics
  30.  *  - Loan Analytics
  31.  *  - Transaction Cost Analytics
  32.  *
  33.  *  DROP Portfolio Core implements libraries for the following:
  34.  *  - Asset Allocation Analytics
  35.  *  - Asset Liability Management Analytics
  36.  *  - Capital Estimation Analytics
  37.  *  - Exposure Analytics
  38.  *  - Margin Analytics
  39.  *  - XVA Analytics
  40.  *
  41.  *  DROP Computational Core implements libraries for the following:
  42.  *  - Algorithm Support
  43.  *  - Computation Support
  44.  *  - Function Analysis
  45.  *  - Model Validation
  46.  *  - Numerical Analysis
  47.  *  - Numerical Optimizer
  48.  *  - Spline Builder
  49.  *  - Statistical Learning
  50.  *
  51.  *  Documentation for DROP is Spread Over:
  52.  *
  53.  *  - Main                     => https://lakshmidrip.github.io/DROP/
  54.  *  - Wiki                     => https://github.com/lakshmiDRIP/DROP/wiki
  55.  *  - GitHub                   => https://github.com/lakshmiDRIP/DROP
  56.  *  - Repo Layout Taxonomy     => https://github.com/lakshmiDRIP/DROP/blob/master/Taxonomy.md
  57.  *  - Javadoc                  => https://lakshmidrip.github.io/DROP/Javadoc/index.html
  58.  *  - Technical Specifications => https://github.com/lakshmiDRIP/DROP/tree/master/Docs/Internal
  59.  *  - Release Versions         => https://lakshmidrip.github.io/DROP/version.html
  60.  *  - Community Credits        => https://lakshmidrip.github.io/DROP/credits.html
  61.  *  - Issues Catalog           => https://github.com/lakshmiDRIP/DROP/issues
  62.  *  - JUnit                    => https://lakshmidrip.github.io/DROP/junit/index.html
  63.  *  - Jacoco                   => https://lakshmidrip.github.io/DROP/jacoco/index.html
  64.  *
  65.  *  Licensed under the Apache License, Version 2.0 (the "License");
  66.  *      you may not use this file except in compliance with the License.
  67.  *  
  68.  *  You may obtain a copy of the License at
  69.  *      http://www.apache.org/licenses/LICENSE-2.0
  70.  *  
  71.  *  Unless required by applicable law or agreed to in writing, software
  72.  *      distributed under the License is distributed on an "AS IS" BASIS,
  73.  *      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  74.  *  
  75.  *  See the License for the specific language governing permissions and
  76.  *      limitations under the License.
  77.  */

  78. /**
  79.  * <i>R1ToRd</i> provides the evaluation of the R<sup>1</sup> To R<sup>d</sup> Objective Function and its
  80.  * derivatives for a specified variate. Default implementation of the derivatives are for non-analytical
  81.  * black box objective functions.
  82.  *
  83.  *  <br><br>
  84.  *  <ul>
  85.  *      <li><b>Module </b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/ComputationalCore.md">Computational Core Module</a></li>
  86.  *      <li><b>Library</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/NumericalAnalysisLibrary.md">Numerical Analysis Library</a></li>
  87.  *      <li><b>Project</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/function/README.md">R<sup>d</sup> To R<sup>d</sup> Function Analysis</a></li>
  88.  *      <li><b>Package</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/function/definition/README.md">Function Implementation Ancillary Support Objects</a></li>
  89.  *  </ul>
  90.  *
  91.  * @author Lakshmi Krishnamurthy
  92.  */

  93. public abstract class R1ToRd {
  94.     private static final int QUADRATURE_SAMPLING = 10000;

  95.     protected org.drip.numerical.differentiation.DerivativeControl _dc = null;

  96.     protected R1ToRd (
  97.         final org.drip.numerical.differentiation.DerivativeControl dc)
  98.     {
  99.         if (null == (_dc = dc)) _dc = new org.drip.numerical.differentiation.DerivativeControl();
  100.     }

  101.     /**
  102.      * Evaluate for the given Input R^1 Variate
  103.      *
  104.      * @param dblVariate The Input R^1 Variate
  105.      *  
  106.      * @return The Output R^d Array
  107.      */

  108.     public abstract double[] evaluate (
  109.         final double dblVariate);

  110.     /**
  111.      * Calculate the Array of Differentials
  112.      *
  113.      * @param dblVariate Variate at which the derivative is to be calculated
  114.      * @param iOrder Order of the derivative to be computed
  115.      *
  116.      * @return The Array of Differentials
  117.      */

  118.     public org.drip.numerical.differentiation.Differential[] differential (
  119.         final double dblVariate,
  120.         final int iOrder)
  121.     {
  122.         if (!org.drip.numerical.common.NumberUtil.IsValid (dblVariate) || 0 >= iOrder) return null;

  123.         int iOutputDimension = -1;
  124.         double[] adblDerivative = null;
  125.         double dblOrderedVariateInfinitesimal = 1.;
  126.         double dblVariateInfinitesimal = java.lang.Double.NaN;

  127.         try {
  128.             dblVariateInfinitesimal = _dc.getVariateInfinitesimal (dblVariate);
  129.         } catch (java.lang.Exception e) {
  130.             e.printStackTrace();

  131.             return null;
  132.         }

  133.         for (int i = 0; i <= iOrder; ++i) {
  134.             if (0 != i) dblOrderedVariateInfinitesimal *= (2. * dblVariateInfinitesimal);

  135.             double dblVariateIncremental = dblVariateInfinitesimal * (iOrder - 2. * i);

  136.             double[] adblValue = evaluate (dblVariateIncremental);

  137.             if (null == adblValue || 0 == (iOutputDimension = adblValue.length)) return null;

  138.             if (null == adblDerivative) {
  139.                 adblDerivative = new double[iOutputDimension];

  140.                 for (int j = 0; j < iOutputDimension; ++j)
  141.                     adblDerivative[j] = 0.;
  142.             }

  143.             try {
  144.                 for (int j = 0; j < iOutputDimension; ++j)
  145.                     adblDerivative[j] += (i % 2 == 0 ? 1 : -1) * org.drip.numerical.common.NumberUtil.NCK
  146.                         (iOrder, i) * adblValue[j];
  147.             } catch (java.lang.Exception e) {
  148.                 e.printStackTrace();

  149.                 return null;
  150.             }
  151.         }

  152.         org.drip.numerical.differentiation.Differential[] aDiff = new
  153.             org.drip.numerical.differentiation.Differential[iOutputDimension];

  154.         try {
  155.             for (int j = 0; j < iOutputDimension; ++j)
  156.                 aDiff[j] = new org.drip.numerical.differentiation.Differential (dblOrderedVariateInfinitesimal,
  157.                     adblDerivative[j]);
  158.         } catch (java.lang.Exception e) {
  159.             e.printStackTrace();

  160.             return null;
  161.         }

  162.         return aDiff;
  163.     }

  164.     /**
  165.      * Calculate the Derivative Array as a double
  166.      *
  167.      * @param dblVariate Variate at which the derivative is to be calculated
  168.      * @param iOrder Order of the derivative to be computed
  169.      *
  170.      * @return The Derivative Array
  171.      */

  172.     public double[] derivative (
  173.         final double dblVariate,
  174.         final int iOrder)
  175.     {
  176.         org.drip.numerical.differentiation.Differential[] aDiff = differential (dblVariate, iOrder);

  177.         if (null == aDiff) return null;

  178.         int iOutputDimension = aDiff.length;
  179.         double[] adblDerivative = new double[iOutputDimension];

  180.         if (0 == iOutputDimension) return null;

  181.         for (int i = 0; i < iOutputDimension; ++i)
  182.             adblDerivative[i] = aDiff[i].calcSlope (true);

  183.         return adblDerivative;
  184.     }

  185.     /**
  186.      * Integrate over the given Input Range Using Uniform Monte-Carlo
  187.      *
  188.      * @param dblLeftEdge Input Left Edge
  189.      * @param dblRightEdge Input Right Edge
  190.      *  
  191.      * @return The Array Containing the Result of the Integration over the specified Range
  192.      */

  193.     public double[] integrate (
  194.         final double dblLeftEdge,
  195.         final double dblRightEdge)
  196.     {
  197.         if (!org.drip.numerical.common.NumberUtil.IsValid (dblLeftEdge) ||
  198.             !org.drip.numerical.common.NumberUtil.IsValid (dblRightEdge) || dblRightEdge <= dblLeftEdge)
  199.             return null;

  200.         int iOutputDimension = -1;
  201.         double[] adblIntegrand = null;
  202.         double dblVariateWidth = dblRightEdge - dblLeftEdge;

  203.         for (int i = 0; i < QUADRATURE_SAMPLING; ++i) {
  204.             double[] adblValue = evaluate (dblLeftEdge + java.lang.Math.random() * dblVariateWidth);

  205.             if (null == adblValue || 0 == (iOutputDimension = adblValue.length)) return null;

  206.             if (null == adblIntegrand) adblIntegrand = new double[iOutputDimension];

  207.             for (int j = 0; j < iOutputDimension; ++j)
  208.                 adblIntegrand[j] += adblValue[j];
  209.         }

  210.         for (int i = 0; i < iOutputDimension; ++i)
  211.             adblIntegrand[i] *= (dblVariateWidth / QUADRATURE_SAMPLING);

  212.         return adblIntegrand;
  213.     }
  214. }