MonotoneConvexHaganWest.java

  1. package org.drip.spline.pchip;

  2. /*
  3.  * -*- mode: java; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  4.  */

  5. /*!
  6.  * Copyright (C) 2020 Lakshmi Krishnamurthy
  7.  * Copyright (C) 2019 Lakshmi Krishnamurthy
  8.  * Copyright (C) 2018 Lakshmi Krishnamurthy
  9.  * Copyright (C) 2017 Lakshmi Krishnamurthy
  10.  * Copyright (C) 2016 Lakshmi Krishnamurthy
  11.  * Copyright (C) 2015 Lakshmi Krishnamurthy
  12.  * Copyright (C) 2014 Lakshmi Krishnamurthy
  13.  * Copyright (C) 2013 Lakshmi Krishnamurthy
  14.  *
  15.  *  This file is part of DROP, an open-source library targeting analytics/risk, transaction cost analytics,
  16.  *      asset liability management analytics, capital, exposure, and margin analytics, valuation adjustment
  17.  *      analytics, and portfolio construction analytics within and across fixed income, credit, commodity,
  18.  *      equity, FX, and structured products. It also includes auxiliary libraries for algorithm support,
  19.  *      numerical analysis, numerical optimization, spline builder, model validation, statistical learning,
  20.  *      and computational support.
  21.  *  
  22.  *      https://lakshmidrip.github.io/DROP/
  23.  *  
  24.  *  DROP is composed of three modules:
  25.  *  
  26.  *  - DROP Product Core - https://lakshmidrip.github.io/DROP-Product-Core/
  27.  *  - DROP Portfolio Core - https://lakshmidrip.github.io/DROP-Portfolio-Core/
  28.  *  - DROP Computational Core - https://lakshmidrip.github.io/DROP-Computational-Core/
  29.  *
  30.  *  DROP Product Core implements libraries for the following:
  31.  *  - Fixed Income Analytics
  32.  *  - Loan Analytics
  33.  *  - Transaction Cost Analytics
  34.  *
  35.  *  DROP Portfolio Core implements libraries for the following:
  36.  *  - Asset Allocation Analytics
  37.  *  - Asset Liability Management Analytics
  38.  *  - Capital Estimation Analytics
  39.  *  - Exposure Analytics
  40.  *  - Margin Analytics
  41.  *  - XVA Analytics
  42.  *
  43.  *  DROP Computational Core implements libraries for the following:
  44.  *  - Algorithm Support
  45.  *  - Computation Support
  46.  *  - Function Analysis
  47.  *  - Model Validation
  48.  *  - Numerical Analysis
  49.  *  - Numerical Optimizer
  50.  *  - Spline Builder
  51.  *  - Statistical Learning
  52.  *
  53.  *  Documentation for DROP is Spread Over:
  54.  *
  55.  *  - Main                     => https://lakshmidrip.github.io/DROP/
  56.  *  - Wiki                     => https://github.com/lakshmiDRIP/DROP/wiki
  57.  *  - GitHub                   => https://github.com/lakshmiDRIP/DROP
  58.  *  - Repo Layout Taxonomy     => https://github.com/lakshmiDRIP/DROP/blob/master/Taxonomy.md
  59.  *  - Javadoc                  => https://lakshmidrip.github.io/DROP/Javadoc/index.html
  60.  *  - Technical Specifications => https://github.com/lakshmiDRIP/DROP/tree/master/Docs/Internal
  61.  *  - Release Versions         => https://lakshmidrip.github.io/DROP/version.html
  62.  *  - Community Credits        => https://lakshmidrip.github.io/DROP/credits.html
  63.  *  - Issues Catalog           => https://github.com/lakshmiDRIP/DROP/issues
  64.  *  - JUnit                    => https://lakshmidrip.github.io/DROP/junit/index.html
  65.  *  - Jacoco                   => https://lakshmidrip.github.io/DROP/jacoco/index.html
  66.  *
  67.  *  Licensed under the Apache License, Version 2.0 (the "License");
  68.  *      you may not use this file except in compliance with the License.
  69.  *  
  70.  *  You may obtain a copy of the License at
  71.  *      http://www.apache.org/licenses/LICENSE-2.0
  72.  *  
  73.  *  Unless required by applicable law or agreed to in writing, software
  74.  *      distributed under the License is distributed on an "AS IS" BASIS,
  75.  *      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  76.  *  
  77.  *  See the License for the specific language governing permissions and
  78.  *      limitations under the License.
  79.  */

  80. /**
  81.  * <i>MonotoneConvexHaganWest</i> implements the regime using the Hagan and West (2006) Estimator. It
  82.  * provides the following functionality:
  83.  *
  84.  * <br><br>
  85.  *  <ul>
  86.  *      <li>
  87.  *          Static Method to Create an instance of MonotoneConvexHaganWest
  88.  *      </li>
  89.  *      <li>
  90.  *          Ensure that the estimated regime is monotone an convex
  91.  *      </li>
  92.  *      <li>
  93.  *          If need be, enforce positivity and/or apply amelioration
  94.  *      </li>
  95.  *      <li>
  96.  *          Apply segment-by-segment range bounds as needed
  97.  *      </li>
  98.  *      <li>
  99.  *          Retrieve predictor ordinates/response values
  100.  *      </li>
  101.  *  </ul>
  102.  *
  103.  * <br><br>
  104.  *  <ul>
  105.  *      <li><b>Module </b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/ComputationalCore.md">Computational Core Module</a></li>
  106.  *      <li><b>Library</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/SplineBuilderLibrary.md">Spline Builder Library</a></li>
  107.  *      <li><b>Project</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/spline/README.md">Basis Splines and Linear Compounders across a Broad Family of Spline Basis Functions</a></li>
  108.  *      <li><b>Package</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/spline/pchip/README.md">Monotone Convex Themed PCHIP Splines</a></li>
  109.  *  </ul>
  110.  * <br><br>
  111.  *
  112.  * @author Lakshmi Krishnamurthy
  113.  */

  114. public class MonotoneConvexHaganWest extends org.drip.function.definition.R1ToR1 {
  115.     private double[] _adblObservation = null;
  116.     private double[] _adblResponseValue = null;
  117.     private boolean _bLinearNodeInference = true;
  118.     private double[] _adblPredictorOrdinate = null;
  119.     private double[] _adblResponseZScoreLeft = null;
  120.     private double[] _adblResponseZScoreRight = null;
  121.     private org.drip.function.definition.R1ToR1[] _aAU = null;

  122.     class Case1Univariate extends org.drip.function.definition.R1ToR1 {
  123.         private double _dblResponseZScoreLeft = java.lang.Double.NaN;
  124.         private double _dblResponseZScoreRight = java.lang.Double.NaN;
  125.         private double _dblPredictorOrdinateLeft = java.lang.Double.NaN;
  126.         private double _dblPredictorOrdinateRight = java.lang.Double.NaN;

  127.         Case1Univariate (
  128.             final double dblPredictorOrdinateLeft,
  129.             final double dblPredictorOrdinateRight,
  130.             final double dblResponseZScoreLeft,
  131.             final double dblResponseZScoreRight)
  132.         {
  133.             super (null);

  134.             _dblResponseZScoreLeft = dblResponseZScoreLeft;
  135.             _dblResponseZScoreRight = dblResponseZScoreRight;
  136.             _dblPredictorOrdinateLeft = dblPredictorOrdinateLeft;
  137.             _dblPredictorOrdinateRight = dblPredictorOrdinateRight;
  138.         }

  139.         @Override public double evaluate (
  140.             final double dblPredictorOrdinate)
  141.             throws java.lang.Exception
  142.         {
  143.             if (!org.drip.numerical.common.NumberUtil.IsValid (dblPredictorOrdinate) || dblPredictorOrdinate <
  144.                 _dblPredictorOrdinateLeft || dblPredictorOrdinate > _dblPredictorOrdinateRight)
  145.                 throw new java.lang.Exception ("Case1Univariate::evaluate => Invalid Inputs");

  146.             double dblX = (dblPredictorOrdinate - _dblPredictorOrdinateLeft) / (_dblPredictorOrdinateRight -
  147.                 _dblPredictorOrdinateLeft);
  148.             return _dblResponseZScoreLeft * (1. - 4. * dblX + 3. * dblX * dblX) + _dblResponseZScoreRight *
  149.                 (-2. * dblX + 3. * dblX * dblX);
  150.         }

  151.         @Override public double integrate (
  152.             final double dblBegin,
  153.             final double dblEnd)
  154.             throws java.lang.Exception
  155.         {
  156.             return org.drip.numerical.integration.R1ToR1Integrator.Boole (this, dblBegin, dblEnd);
  157.         }
  158.     }

  159.     class Case2Univariate extends org.drip.function.definition.R1ToR1 {
  160.         private double _dblEta = java.lang.Double.NaN;
  161.         private double _dblResponseZScoreLeft = java.lang.Double.NaN;
  162.         private double _dblResponseZScoreRight = java.lang.Double.NaN;
  163.         private double _dblPredictorOrdinateLeft = java.lang.Double.NaN;
  164.         private double _dblPredictorOrdinateRight = java.lang.Double.NaN;

  165.         Case2Univariate (
  166.             final double dblPredictorOrdinateLeft,
  167.             final double dblPredictorOrdinateRight,
  168.             final double dblResponseZScoreLeft,
  169.             final double dblResponseZScoreRight)
  170.         {
  171.             super (null);

  172.             _dblResponseZScoreLeft = dblResponseZScoreLeft;
  173.             _dblResponseZScoreRight = dblResponseZScoreRight;
  174.             _dblPredictorOrdinateLeft = dblPredictorOrdinateLeft;
  175.             _dblPredictorOrdinateRight = dblPredictorOrdinateRight;
  176.             _dblEta = _dblResponseZScoreLeft != _dblResponseZScoreRight ? (_dblResponseZScoreRight + 2. *
  177.                 _dblResponseZScoreLeft) / (_dblResponseZScoreRight - _dblResponseZScoreLeft) : 0.;
  178.         }

  179.         @Override public double evaluate (
  180.             final double dblPredictorOrdinate)
  181.             throws java.lang.Exception
  182.         {
  183.             if (!org.drip.numerical.common.NumberUtil.IsValid (dblPredictorOrdinate) || dblPredictorOrdinate <
  184.                 _dblPredictorOrdinateLeft || dblPredictorOrdinate > _dblPredictorOrdinateRight)
  185.                 throw new java.lang.Exception ("Case2Univariate::evaluate => Invalid Inputs");

  186.             if (_dblResponseZScoreLeft == _dblResponseZScoreRight) return _dblResponseZScoreRight;

  187.             double dblX = (dblPredictorOrdinate - _dblPredictorOrdinateLeft) / (_dblPredictorOrdinateRight -
  188.                 _dblPredictorOrdinateLeft);
  189.             return dblX <= _dblEta ? _dblResponseZScoreLeft : _dblResponseZScoreLeft +
  190.                 (_dblResponseZScoreRight - _dblResponseZScoreLeft) * (dblX - _dblEta) * (dblX - _dblEta) /
  191.                     (1. - _dblEta) / (1. - _dblEta);
  192.         }

  193.         @Override public double integrate (
  194.             final double dblBegin,
  195.             final double dblEnd)
  196.             throws java.lang.Exception
  197.         {
  198.             return org.drip.numerical.integration.R1ToR1Integrator.Boole (this, dblBegin, dblEnd);
  199.         }
  200.     }

  201.     class Case3Univariate extends org.drip.function.definition.R1ToR1 {
  202.         private double _dblEta = java.lang.Double.NaN;
  203.         private double _dblResponseZScoreLeft = java.lang.Double.NaN;
  204.         private double _dblResponseZScoreRight = java.lang.Double.NaN;
  205.         private double _dblPredictorOrdinateLeft = java.lang.Double.NaN;
  206.         private double _dblPredictorOrdinateRight = java.lang.Double.NaN;

  207.         Case3Univariate (
  208.             final double dblPredictorOrdinateLeft,
  209.             final double dblPredictorOrdinateRight,
  210.             final double dblResponseZScoreLeft,
  211.             final double dblResponseZScoreRight)
  212.         {
  213.             super (null);

  214.             _dblResponseZScoreLeft = dblResponseZScoreLeft;
  215.             _dblResponseZScoreRight = dblResponseZScoreRight;
  216.             _dblPredictorOrdinateLeft = dblPredictorOrdinateLeft;
  217.             _dblPredictorOrdinateRight = dblPredictorOrdinateRight;
  218.             _dblEta = _dblResponseZScoreLeft != _dblResponseZScoreRight ? 3. * _dblResponseZScoreRight /
  219.                 (_dblResponseZScoreRight - _dblResponseZScoreLeft) : 0.;
  220.         }

  221.         @Override public double evaluate (
  222.             final double dblPredictorOrdinate)
  223.             throws java.lang.Exception
  224.         {
  225.             if (!org.drip.numerical.common.NumberUtil.IsValid (dblPredictorOrdinate) || dblPredictorOrdinate <
  226.                 _dblPredictorOrdinateLeft || dblPredictorOrdinate > _dblPredictorOrdinateRight)
  227.                 throw new java.lang.Exception ("Case3Univariate::evaluate => Invalid Inputs");

  228.             if (_dblResponseZScoreLeft == _dblResponseZScoreRight) return _dblResponseZScoreRight;

  229.             double dblX = (dblPredictorOrdinate - _dblPredictorOrdinateLeft) / (_dblPredictorOrdinateRight -
  230.                 _dblPredictorOrdinateLeft);
  231.             return dblX < _dblEta ? _dblResponseZScoreLeft + (_dblResponseZScoreLeft -
  232.                 _dblResponseZScoreRight) * (_dblEta - dblX) * (_dblEta - dblX) / _dblEta / _dblEta :
  233.                     _dblResponseZScoreRight;
  234.         }

  235.         @Override public double integrate (
  236.             final double dblBegin,
  237.             final double dblEnd)
  238.             throws java.lang.Exception
  239.         {
  240.             return org.drip.numerical.integration.R1ToR1Integrator.Boole (this, dblBegin, dblEnd);
  241.         }
  242.     }

  243.     class Case4Univariate extends org.drip.function.definition.R1ToR1 {
  244.         private double _dblA = java.lang.Double.NaN;
  245.         private double _dblEta = java.lang.Double.NaN;
  246.         private double _dblResponseZScoreLeft = java.lang.Double.NaN;
  247.         private double _dblResponseZScoreRight = java.lang.Double.NaN;
  248.         private double _dblPredictorOrdinateLeft = java.lang.Double.NaN;
  249.         private double _dblPredictorOrdinateRight = java.lang.Double.NaN;

  250.         Case4Univariate (
  251.             final double dblPredictorOrdinateLeft,
  252.             final double dblPredictorOrdinateRight,
  253.             final double dblResponseZScoreLeft,
  254.             final double dblResponseZScoreRight)
  255.         {
  256.             super (null);

  257.             _dblResponseZScoreLeft = dblResponseZScoreLeft;
  258.             _dblResponseZScoreRight = dblResponseZScoreRight;
  259.             _dblPredictorOrdinateLeft = dblPredictorOrdinateLeft;
  260.             _dblPredictorOrdinateRight = dblPredictorOrdinateRight;

  261.             if (_dblResponseZScoreLeft != _dblResponseZScoreRight) {
  262.                 _dblEta = _dblResponseZScoreRight / (_dblResponseZScoreRight - _dblResponseZScoreLeft);
  263.                 _dblA = -1. * _dblResponseZScoreLeft * _dblResponseZScoreRight / (_dblResponseZScoreRight -
  264.                     _dblResponseZScoreLeft);
  265.             } else {
  266.                 _dblA = 0.;
  267.                 _dblEta = 0.;
  268.             }
  269.         }

  270.         @Override public double evaluate (
  271.             final double dblPredictorOrdinate)
  272.             throws java.lang.Exception
  273.         {
  274.             if (!org.drip.numerical.common.NumberUtil.IsValid (dblPredictorOrdinate) || dblPredictorOrdinate <
  275.                 _dblPredictorOrdinateLeft || dblPredictorOrdinate > _dblPredictorOrdinateRight)
  276.                 throw new java.lang.Exception ("Case4Univariate::evaluate => Invalid Inputs");

  277.             if (_dblResponseZScoreLeft == _dblResponseZScoreRight) return _dblResponseZScoreRight;

  278.             double dblX = (dblPredictorOrdinate - _dblPredictorOrdinateLeft) / (_dblPredictorOrdinateRight -
  279.                 _dblPredictorOrdinateLeft);
  280.             return dblX < _dblEta ? _dblA + (_dblResponseZScoreLeft - _dblA) * (_dblEta - dblX) * (_dblEta -
  281.                 dblX) / _dblEta / _dblEta : _dblA + (_dblResponseZScoreRight - _dblA) * (dblX - _dblEta) *
  282.                     (dblX - _dblEta) / (1. - _dblEta) / (1. - _dblEta);
  283.         }

  284.         @Override public double integrate (
  285.             final double dblBegin,
  286.             final double dblEnd)
  287.             throws java.lang.Exception
  288.         {
  289.             return org.drip.numerical.integration.R1ToR1Integrator.Boole (this, dblBegin, dblEnd);
  290.         }
  291.     }

  292.     /**
  293.      * Create an instance of MonotoneConvexHaganWest
  294.      *
  295.      * @param adblPredictorOrdinate Array of Predictor Ordinates
  296.      * @param adblObservation Array of Observations
  297.      * @param bLinearNodeInference Apply Linear Node Inference from Observations
  298.      *
  299.      * @return Instance of MonotoneConvexHaganWest
  300.      */

  301.     public static final MonotoneConvexHaganWest Create (
  302.         final double[] adblPredictorOrdinate,
  303.         final double[] adblObservation,
  304.         final boolean bLinearNodeInference)
  305.     {
  306.         MonotoneConvexHaganWest mchw = null;

  307.         try {
  308.             mchw = new MonotoneConvexHaganWest (adblPredictorOrdinate, adblObservation,
  309.                 bLinearNodeInference);
  310.         } catch (java.lang.Exception e) {
  311.             e.printStackTrace();

  312.             return null;
  313.         }

  314.         return mchw.inferResponseValues() && mchw.inferResponseZScores() && mchw.generateUnivariate() ? mchw
  315.             : null;
  316.     }

  317.     private MonotoneConvexHaganWest (
  318.         final double[] adblPredictorOrdinate,
  319.         final double[] adblObservation,
  320.         final boolean bLinearNodeInference)
  321.         throws java.lang.Exception
  322.     {
  323.         super (null);

  324.         if (null == (_adblObservation = adblObservation) || null == (_adblPredictorOrdinate =
  325.             adblPredictorOrdinate))
  326.             throw new java.lang.Exception ("MonotoneConvexHaganWest ctr: Invalid Inputs!");

  327.         _bLinearNodeInference = bLinearNodeInference;
  328.         int iNumObservation = _adblObservation.length;

  329.         if (1 >= iNumObservation || iNumObservation + 1 != _adblPredictorOrdinate.length)
  330.             throw new java.lang.Exception ("MonotoneConvexHaganWest ctr: Invalid Inputs!");
  331.     }

  332.     private boolean inferResponseValues()
  333.     {
  334.         int iNumPredictorOrdinate = _adblPredictorOrdinate.length;
  335.         _adblResponseValue = new double[iNumPredictorOrdinate];

  336.         for (int i = 1; i < iNumPredictorOrdinate - 1; ++i) {
  337.             if (_bLinearNodeInference)
  338.                 _adblResponseValue[i] = (_adblPredictorOrdinate[i] - _adblPredictorOrdinate[i - 1]) /
  339.                     (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i - 1]) * _adblObservation[i] +
  340.                         (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i]) /
  341.                             (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i - 1]) *
  342.                                 _adblObservation[i - 1];
  343.             else {
  344.                 _adblResponseValue[i] = 0.;

  345.                 if (_adblObservation[i - 1] * _adblObservation[i] > 0.) {
  346.                     _adblResponseValue[i] = (_adblPredictorOrdinate[i] - _adblPredictorOrdinate[i - 1] + 2. *
  347.                         (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i])) / (3. *
  348.                             (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i])) /
  349.                                 _adblObservation[i - 1];
  350.                     _adblResponseValue[i] += (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i] + 2.
  351.                         * (_adblPredictorOrdinate[i] - _adblPredictorOrdinate[i - 1])) / (3. *
  352.                             (_adblPredictorOrdinate[i + 1] - _adblPredictorOrdinate[i])) /
  353.                                 _adblObservation[i];
  354.                     _adblResponseValue[i] = 1. / _adblResponseValue[i];
  355.                 }
  356.             }
  357.         }

  358.         _adblResponseValue[0] = _adblObservation[0] - 0.5 * (_adblResponseValue[1] - _adblObservation[0]);
  359.         _adblResponseValue[iNumPredictorOrdinate - 1] = _adblObservation[iNumPredictorOrdinate - 2] - 0.5 *
  360.             (_adblResponseValue[iNumPredictorOrdinate - 2] - _adblObservation[iNumPredictorOrdinate - 2]);
  361.         return true;
  362.     }

  363.     private boolean inferResponseZScores()
  364.     {
  365.         int iNumSegment = _adblPredictorOrdinate.length - 1;
  366.         _adblResponseZScoreLeft = new double[iNumSegment];
  367.         _adblResponseZScoreRight = new double[iNumSegment];

  368.         for (int i = 0; i < iNumSegment; ++i) {
  369.             _adblResponseZScoreLeft[i] = _adblResponseValue[i] - _adblObservation[i];
  370.             _adblResponseZScoreRight[i] = _adblResponseValue[i + 1] - _adblObservation[i];
  371.         }

  372.         return true;
  373.     }

  374.     private boolean generateUnivariate()
  375.     {
  376.         int iNumSegment = _adblPredictorOrdinate.length - 1;
  377.         _aAU = new org.drip.function.definition.R1ToR1[iNumSegment];

  378.         for (int i = 0; i < iNumSegment; ++i) {
  379.             if ((_adblResponseZScoreLeft[i] > 0. && -0.5 * _adblResponseZScoreLeft[i] >=
  380.                 _adblResponseZScoreRight[i] && _adblResponseZScoreRight[i] >= -2. *
  381.                     _adblResponseZScoreLeft[i]) || (_adblResponseZScoreLeft[i] < 0. && -0.5 *
  382.                         _adblResponseZScoreLeft[i] <= _adblResponseZScoreRight[i] &&
  383.                             _adblResponseZScoreRight[i] <= -2. * _adblResponseZScoreLeft[i]))
  384.                 _aAU[i] = new Case1Univariate (_adblPredictorOrdinate[i], _adblPredictorOrdinate[i + 1],
  385.                     _adblResponseZScoreLeft[i], _adblResponseZScoreRight[i]);
  386.             else if ((_adblResponseZScoreLeft[i] < 0. && _adblResponseZScoreRight[i] > -2. *
  387.                 _adblResponseZScoreLeft[i]) || (_adblResponseZScoreLeft[i] > 0. &&
  388.                     _adblResponseZScoreRight[i] < -2. * _adblResponseZScoreLeft[i]))
  389.                 _aAU[i] = new Case2Univariate (_adblPredictorOrdinate[i], _adblPredictorOrdinate[i + 1],
  390.                     _adblResponseZScoreLeft[i], _adblResponseZScoreRight[i]);
  391.             else if ((_adblResponseZScoreLeft[i] > 0. && _adblResponseZScoreRight[i] > -0.5 *
  392.                 _adblResponseZScoreLeft[i]) || (_adblResponseZScoreLeft[i] < 0. &&
  393.                     _adblResponseZScoreRight[i] < -0.5 * _adblResponseZScoreLeft[i]))
  394.                 _aAU[i] = new Case3Univariate (_adblPredictorOrdinate[i], _adblPredictorOrdinate[i + 1],
  395.                     _adblResponseZScoreLeft[i], _adblResponseZScoreRight[i]);
  396.             else if ((_adblResponseZScoreLeft[i] >= 0. && _adblResponseZScoreRight[i] >= 0.) ||
  397.                 (_adblResponseZScoreLeft[i] <= 0. && _adblResponseZScoreRight[i] <= 0.))
  398.                 _aAU[i] = new Case4Univariate (_adblPredictorOrdinate[i], _adblPredictorOrdinate[i + 1],
  399.                     _adblResponseZScoreLeft[i], _adblResponseZScoreRight[i]);
  400.         }

  401.         return true;
  402.     }

  403.     private boolean ameliorate (
  404.         final double[] adblResponseLeftMin,
  405.         final double[] adblResponseLeftMax,
  406.         final double[] adblResponseRightMin,
  407.         final double[] adblResponseRightMax)
  408.     {
  409.         int iNumObservation = _adblObservation.length;

  410.         if (iNumObservation != adblResponseLeftMin.length || iNumObservation != adblResponseLeftMax.length ||
  411.             iNumObservation != adblResponseRightMin.length || iNumObservation != adblResponseRightMax.length)
  412.             return false;

  413.         for (int i = 0; i < iNumObservation; ++i) {
  414.             if (_adblResponseValue[i] < java.lang.Math.max (adblResponseLeftMin[i], adblResponseRightMin[i])
  415.                 || _adblResponseValue[i] > java.lang.Math.min (adblResponseLeftMax[i],
  416.                     adblResponseRightMax[i])) {
  417.                 if (_adblResponseValue[i] < java.lang.Math.max (adblResponseLeftMin[i],
  418.                     adblResponseRightMin[i]))
  419.                     _adblResponseValue[i] = java.lang.Math.max (adblResponseLeftMin[i],
  420.                         adblResponseRightMin[i]);
  421.                 else if (_adblResponseValue[i] > java.lang.Math.min (adblResponseLeftMax[i],
  422.                     adblResponseRightMax[i]))
  423.                     _adblResponseValue[i] = java.lang.Math.min (adblResponseLeftMax[i],
  424.                         adblResponseRightMax[i]);
  425.             } else {
  426.                 if (_adblResponseValue[i] < java.lang.Math.min (adblResponseLeftMax[i],
  427.                     adblResponseRightMax[i]))
  428.                     _adblResponseValue[i] = java.lang.Math.min (adblResponseLeftMax[i],
  429.                         adblResponseRightMax[i]);
  430.                 else if (_adblResponseValue[i] > java.lang.Math.max (adblResponseLeftMin[i],
  431.                     adblResponseRightMin[i]))
  432.                     _adblResponseValue[i] = java.lang.Math.max (adblResponseLeftMin[i],
  433.                         adblResponseRightMin[i]);
  434.             }
  435.         }

  436.         if (java.lang.Math.abs (_adblResponseValue[0] - _adblObservation[0]) > 0.5 * java.lang.Math.abs
  437.             (_adblResponseValue[1] - _adblObservation[0]))
  438.             _adblResponseValue[0] = _adblObservation[1] - 0.5 * (_adblResponseValue[1] -
  439.                 _adblObservation[0]);

  440.         if (java.lang.Math.abs (_adblResponseValue[iNumObservation] - _adblObservation[iNumObservation - 1])
  441.             > 0.5 * java.lang.Math.abs (_adblResponseValue[iNumObservation - 1] -
  442.                 _adblObservation[iNumObservation - 1]))
  443.             _adblResponseValue[iNumObservation] = _adblObservation[iNumObservation - 1] - 0.5 *
  444.                 (_adblObservation[iNumObservation - 1] - _adblResponseValue[iNumObservation - 1]);

  445.         return inferResponseZScores() && generateUnivariate();
  446.     }

  447.     private int containingIndex (
  448.         final double dblPredictorOrdinate,
  449.         final boolean bIncludeLeft,
  450.         final boolean bIncludeRight)
  451.         throws java.lang.Exception
  452.     {
  453.         int iNumSegment = _aAU.length;

  454.         for (int i = 0 ; i < iNumSegment; ++i) {
  455.             boolean bLeftValid = bIncludeLeft ? _adblPredictorOrdinate[i] <= dblPredictorOrdinate :
  456.                 _adblPredictorOrdinate[i] < dblPredictorOrdinate;

  457.             boolean bRightValid = bIncludeRight ? _adblPredictorOrdinate[i + 1] >= dblPredictorOrdinate :
  458.                 _adblPredictorOrdinate[i + 1] > dblPredictorOrdinate;

  459.             if (bLeftValid && bRightValid) return i;
  460.         }

  461.         throw new java.lang.Exception
  462.             ("MonotoneConvexHaganWest::containingIndex => Cannot locate Containing Index");
  463.     }

  464.     @Override public double evaluate (
  465.         final double dblPredictorOrdinate)
  466.         throws java.lang.Exception
  467.     {
  468.         int iContainingIndex = containingIndex (dblPredictorOrdinate, true, true);

  469.         return _aAU[iContainingIndex].evaluate (dblPredictorOrdinate) + _adblObservation[iContainingIndex];
  470.     }

  471.     /**
  472.      * Enforce the Positivity of the Inferred Response Values
  473.      *
  474.      * @return TRUE - Positivity Enforcement is successful
  475.      */

  476.     public boolean enforcePositivity()
  477.     {
  478.         try {
  479.             _adblResponseValue[0] = org.drip.numerical.common.NumberUtil.Bound (_adblResponseValue[0], 0., 2. *
  480.                 _adblObservation[0]);

  481.             int iNumObservation = _adblObservation.length;

  482.             for (int i = 1; i < iNumObservation; ++i)
  483.                 _adblResponseValue[i] = org.drip.numerical.common.NumberUtil.Bound (_adblResponseValue[i], 0., 2.
  484.                     * java.lang.Math.min (_adblObservation[i - 1], _adblObservation[i]));

  485.             _adblResponseValue[iNumObservation] = org.drip.numerical.common.NumberUtil.Bound
  486.                 (_adblResponseValue[iNumObservation], 0., 2. * _adblObservation[iNumObservation - 1]);

  487.             return inferResponseZScores() && generateUnivariate();
  488.         } catch (java.lang.Exception e) {
  489.             e.printStackTrace();
  490.         }

  491.         return false;
  492.     }

  493.     /**
  494.      * Create an Ameliorated Instance of the Current Instance
  495.      *
  496.      * @param adblResponseLeftMin Response Left Floor
  497.      * @param adblResponseLeftMax Response Left Ceiling
  498.      * @param adblResponseRightMin Response Right Floor
  499.      * @param adblResponseRightMax Response Right Ceiling
  500.      * @param bEnforcePositivity TRUE - Enforce Positivity
  501.      *
  502.      * @return The Ameliorated Version of the Current Instance
  503.      */

  504.     public MonotoneConvexHaganWest generateAmelioratedInstance (
  505.         final double[] adblResponseLeftMin,
  506.         final double[] adblResponseLeftMax,
  507.         final double[] adblResponseRightMin,
  508.         final double[] adblResponseRightMax,
  509.         final boolean bEnforcePositivity)
  510.     {
  511.         if (null == adblResponseLeftMin || null == adblResponseLeftMax | null == adblResponseRightMin || null
  512.             == adblResponseRightMax)
  513.             return null;

  514.         int iNumAmelioratedObservation = _adblObservation.length + 2;
  515.         int iNumAmelioratedPredicatorOrdinate = _adblPredictorOrdinate.length + 2;
  516.         double[] adblAmelioratedObservation = new double[iNumAmelioratedObservation];
  517.         double[] adblAmelioratedPredictorOrdinate = new double[iNumAmelioratedPredicatorOrdinate];

  518.         for (int i = 0; i < iNumAmelioratedPredicatorOrdinate; ++i) {
  519.             if (0 == i)
  520.                 adblAmelioratedPredictorOrdinate[0] = -1. * _adblPredictorOrdinate[1];
  521.             else if (iNumAmelioratedPredicatorOrdinate - 1 == i)
  522.                 adblAmelioratedPredictorOrdinate[i] = 2. * _adblPredictorOrdinate[i - 1] -
  523.                     _adblPredictorOrdinate[i - 2];
  524.             else
  525.                 adblAmelioratedPredictorOrdinate[i] = _adblPredictorOrdinate[i - 1];
  526.         }

  527.         for (int i = 0; i < iNumAmelioratedObservation; ++i) {
  528.             if (0 == i)
  529.                 adblAmelioratedObservation[0] = _adblObservation[0] - (_adblPredictorOrdinate[1] -
  530.                     _adblPredictorOrdinate[0]) * (_adblObservation[1] - _adblObservation[0]) /
  531.                         (_adblPredictorOrdinate[2] - _adblPredictorOrdinate[0]);
  532.             else if (iNumAmelioratedPredicatorOrdinate - 1 == i)
  533.                 adblAmelioratedObservation[i] = _adblObservation[i - 1] - (_adblPredictorOrdinate[i - 1] -
  534.                     _adblPredictorOrdinate[i - 2]) * (_adblObservation[i - 1] - _adblObservation[i - 2]) /
  535.                         (_adblPredictorOrdinate[i - 1] - _adblPredictorOrdinate[i - 3]);
  536.             else
  537.                 adblAmelioratedObservation[i] = _adblObservation[i - 1];
  538.         }

  539.         MonotoneConvexHaganWest mchwAmeliorated = Create (adblAmelioratedPredictorOrdinate,
  540.             adblAmelioratedObservation, _bLinearNodeInference);

  541.         if (null == mchwAmeliorated || mchwAmeliorated.ameliorate (adblResponseLeftMin, adblResponseLeftMax,
  542.             adblResponseRightMin, adblResponseRightMax))
  543.             return null;

  544.         if (bEnforcePositivity) {
  545.             if (!mchwAmeliorated.enforcePositivity()) return null;
  546.         }

  547.         return mchwAmeliorated;
  548.     }

  549.     /**
  550.      * Retrieve the Array of Predictor Ordinates
  551.      *
  552.      * @return The Array of Predictor Ordinates
  553.      */

  554.     public double[] predictorOrdinates()
  555.     {
  556.         return _adblPredictorOrdinate;
  557.     }

  558.     /**
  559.      * Retrieve the Array of Response Values
  560.      *
  561.      * @return The Array of Response Values
  562.      */

  563.     public double[] responseValues()
  564.     {
  565.         return _adblResponseValue;
  566.     }
  567. }