LpLossLearner.java

  1. package org.drip.learning.rxtor1;

  2. /*
  3.  * -*- mode: java; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  4.  */

  5. /*!
  6.  * Copyright (C) 2020 Lakshmi Krishnamurthy
  7.  * Copyright (C) 2019 Lakshmi Krishnamurthy
  8.  * Copyright (C) 2018 Lakshmi Krishnamurthy
  9.  * Copyright (C) 2017 Lakshmi Krishnamurthy
  10.  * Copyright (C) 2016 Lakshmi Krishnamurthy
  11.  * Copyright (C) 2015 Lakshmi Krishnamurthy
  12.  *
  13.  *  This file is part of DROP, an open-source library targeting analytics/risk, transaction cost analytics,
  14.  *      asset liability management analytics, capital, exposure, and margin analytics, valuation adjustment
  15.  *      analytics, and portfolio construction analytics within and across fixed income, credit, commodity,
  16.  *      equity, FX, and structured products. It also includes auxiliary libraries for algorithm support,
  17.  *      numerical analysis, numerical optimization, spline builder, model validation, statistical learning,
  18.  *      and computational support.
  19.  *  
  20.  *      https://lakshmidrip.github.io/DROP/
  21.  *  
  22.  *  DROP is composed of three modules:
  23.  *  
  24.  *  - DROP Product Core - https://lakshmidrip.github.io/DROP-Product-Core/
  25.  *  - DROP Portfolio Core - https://lakshmidrip.github.io/DROP-Portfolio-Core/
  26.  *  - DROP Computational Core - https://lakshmidrip.github.io/DROP-Computational-Core/
  27.  *
  28.  *  DROP Product Core implements libraries for the following:
  29.  *  - Fixed Income Analytics
  30.  *  - Loan Analytics
  31.  *  - Transaction Cost Analytics
  32.  *
  33.  *  DROP Portfolio Core implements libraries for the following:
  34.  *  - Asset Allocation Analytics
  35.  *  - Asset Liability Management Analytics
  36.  *  - Capital Estimation Analytics
  37.  *  - Exposure Analytics
  38.  *  - Margin Analytics
  39.  *  - XVA Analytics
  40.  *
  41.  *  DROP Computational Core implements libraries for the following:
  42.  *  - Algorithm Support
  43.  *  - Computation Support
  44.  *  - Function Analysis
  45.  *  - Model Validation
  46.  *  - Numerical Analysis
  47.  *  - Numerical Optimizer
  48.  *  - Spline Builder
  49.  *  - Statistical Learning
  50.  *
  51.  *  Documentation for DROP is Spread Over:
  52.  *
  53.  *  - Main                     => https://lakshmidrip.github.io/DROP/
  54.  *  - Wiki                     => https://github.com/lakshmiDRIP/DROP/wiki
  55.  *  - GitHub                   => https://github.com/lakshmiDRIP/DROP
  56.  *  - Repo Layout Taxonomy     => https://github.com/lakshmiDRIP/DROP/blob/master/Taxonomy.md
  57.  *  - Javadoc                  => https://lakshmidrip.github.io/DROP/Javadoc/index.html
  58.  *  - Technical Specifications => https://github.com/lakshmiDRIP/DROP/tree/master/Docs/Internal
  59.  *  - Release Versions         => https://lakshmidrip.github.io/DROP/version.html
  60.  *  - Community Credits        => https://lakshmidrip.github.io/DROP/credits.html
  61.  *  - Issues Catalog           => https://github.com/lakshmiDRIP/DROP/issues
  62.  *  - JUnit                    => https://lakshmidrip.github.io/DROP/junit/index.html
  63.  *  - Jacoco                   => https://lakshmidrip.github.io/DROP/jacoco/index.html
  64.  *
  65.  *  Licensed under the Apache License, Version 2.0 (the "License");
  66.  *      you may not use this file except in compliance with the License.
  67.  *  
  68.  *  You may obtain a copy of the License at
  69.  *      http://www.apache.org/licenses/LICENSE-2.0
  70.  *  
  71.  *  Unless required by applicable law or agreed to in writing, software
  72.  *      distributed under the License is distributed on an "AS IS" BASIS,
  73.  *      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  74.  *  
  75.  *  See the License for the specific language governing permissions and
  76.  *      limitations under the License.
  77.  */

  78. /**
  79.  * <i>LpLossLearner</i> implements the Learner Class that holds the Space of Normed R<sup>x</sup> To Normed
  80.  * R<sup>1</sup> Learning Functions for the Family of Loss Functions that are Polynomial, i.e.,
  81.  *
  82.  *              loss (eta) = (eta ^ p) / p,  for p greater than 1.
  83.  *
  84.  * This is Lipschitz, with a Lipschitz Slope of
  85.  *
  86.  *              C = (b - a) ^ (p - 1)
  87.  *  
  88.  * <br><br>
  89.  * The References are:
  90.  *  
  91.  * <br><br>
  92.  * <ul>
  93.  *  <li>
  94.  *      Alon, N., S. Ben-David, N. Cesa Bianchi, and D. Haussler (1997): Scale-sensitive Dimensions, Uniform
  95.  *          Convergence, and Learnability <i>Journal of Association of Computational Machinery</i> <b>44
  96.  *          (4)</b> 615-631
  97.  *  </li>
  98.  *  <li>
  99.  *      Anthony, M., and P. L. Bartlett (1999): <i>Artificial Neural Network Learning - Theoretical
  100.  *          Foundations</i> <b>Cambridge University Press</b> Cambridge, UK
  101.  *  </li>
  102.  *  <li>
  103.  *      Kearns, M. J., R. E. Schapire, and L. M. Sellie (1994): <i>Towards Efficient Agnostic Learning</i>
  104.  *          Machine Learning <b>17 (2)</b> 115-141
  105.  *  </li>
  106.  *  <li>
  107.  *      Lee, W. S., P. L. Bartlett, and R. C. Williamson (1998): The Importance of Convexity in Learning with
  108.  *          Squared Loss <i>IEEE Transactions on Information Theory</i> <b>44</b> 1974-1980
  109.  *  </li>
  110.  *  <li>
  111.  *      Vapnik, V. N. (1998): <i>Statistical learning Theory</i> <b>Wiley</b> New York
  112.  *  </li>
  113.  * </ul>
  114.  *
  115.  *  <br><br>
  116.  *  <ul>
  117.  *      <li><b>Module </b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/ComputationalCore.md">Computational Core Module</a></li>
  118.  *      <li><b>Library</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/StatisticalLearningLibrary.md">Statistical Learning</a></li>
  119.  *      <li><b>Project</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/learning">Agnostic Learning Bounds under Empirical Loss Minimization Schemes</a></li>
  120.  *      <li><b>Package</b> = <a href = "https://github.com/lakshmiDRIP/DROP/tree/master/src/main/java/org/drip/learning/rxtor1">Statistical Learning Empirical Loss Penalizer</a></li>
  121.  *  </ul>
  122.  *
  123.  * @author Lakshmi Krishnamurthy
  124.  */

  125. public class LpLossLearner extends org.drip.learning.rxtor1.GeneralizedLearner {
  126.     private double _dblLossExponent = java.lang.Double.NaN;

  127.     /**
  128.      * LpLossLearner Constructor
  129.      *
  130.      * @param funcClassRxToR1 R^x To R^1 Function Class
  131.      * @param cdpb The Covering Number based Deviation Upper Probability Bound Generator
  132.      * @param regularizerFunc The Regularizer Function
  133.      * @param dblLossExponent The Loss Exponent
  134.      *
  135.      * @throws java.lang.Exception Thrown if the Inputs are Invalid
  136.      */

  137.     public LpLossLearner (
  138.         final org.drip.spaces.functionclass.NormedRxToNormedR1Finite funcClassRxToR1,
  139.         final org.drip.learning.bound.CoveringNumberLossBound cdpb,
  140.         final org.drip.learning.regularization.RegularizationFunction regularizerFunc,
  141.         final double dblLossExponent)
  142.         throws java.lang.Exception
  143.     {
  144.         super (funcClassRxToR1, cdpb, regularizerFunc);

  145.         if (!org.drip.numerical.common.NumberUtil.IsValid (_dblLossExponent = dblLossExponent) || 1. >
  146.             _dblLossExponent)
  147.             throw new java.lang.Exception ("LpLossLearner ctr: Invalid Inputs");
  148.     }

  149.     /**
  150.      * Retrieve the Loss Exponent
  151.      *
  152.      * @return The Loss Exponent
  153.      */

  154.     public double lossExponent()
  155.     {
  156.         return _dblLossExponent;
  157.     }

  158.     /**
  159.      * Retrieve the Lipschitz Slope Bound
  160.      *
  161.      * @return The Lipschitz Slope Bound
  162.      */

  163.     public double lipschitzSlope()
  164.     {
  165.         org.drip.spaces.metric.GeneralizedMetricVectorSpace gmvsInput =
  166.             functionClass().inputMetricVectorSpace();

  167.         return java.lang.Math.pow (gmvsInput.rightEdge() - gmvsInput.leftEdge(), _dblLossExponent - 1.);
  168.     }

  169.     @Override public double lossSampleCoveringNumber (
  170.         final org.drip.spaces.instance.GeneralizedValidatedVector gvvi,
  171.         final double dblEpsilon,
  172.         final boolean bSupremum)
  173.         throws java.lang.Exception
  174.     {
  175.         if (null == gvvi || !org.drip.numerical.common.NumberUtil.IsValid (dblEpsilon) || 0. >= dblEpsilon)
  176.             throw new java.lang.Exception ("LpLossLearner::lossSampleCoveringNumber => Invalid Inputs");

  177.         double dblLipschitzCover = dblEpsilon / lipschitzSlope();

  178.         org.drip.spaces.functionclass.NormedRxToNormedR1Finite funcClassRxToR1 = functionClass();

  179.         org.drip.learning.bound.LipschitzCoveringNumberBound llcn = new
  180.             org.drip.learning.bound.LipschitzCoveringNumberBound
  181.                 (funcClassRxToR1.sampleSupremumCoveringNumber (gvvi, dblLipschitzCover),
  182.                     funcClassRxToR1.sampleCoveringNumber (gvvi, gvvi.sampleSize() * dblLipschitzCover));

  183.         return bSupremum ? llcn.supremumUpperBound() : llcn.lpUpperBound();
  184.     }

  185.     @Override public double empiricalLoss (
  186.         final org.drip.function.definition.R1ToR1 funcLearnerR1ToR1,
  187.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviX,
  188.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviY)
  189.         throws java.lang.Exception
  190.     {
  191.         if (null == funcLearnerR1ToR1 || null == gvviX || !(gvviX instanceof
  192.             org.drip.spaces.instance.ValidatedR1) || null == gvviY || !(gvviY instanceof
  193.                 org.drip.spaces.instance.ValidatedR1))
  194.             throw new java.lang.Exception ("LpLossLearner::empiricalLoss => Invalid Inputs");

  195.         double[] adblX = ((org.drip.spaces.instance.ValidatedR1) gvviX).instance();

  196.         double[] adblY = ((org.drip.spaces.instance.ValidatedR1) gvviY).instance();

  197.         double dblEmpiricalLoss = 0.;
  198.         int iNumSample = adblX.length;

  199.         if (iNumSample != adblY.length)
  200.             throw new java.lang.Exception ("LpLossLearner::empiricalLoss => Invalid Inputs");

  201.         for (int i = 0; i < iNumSample; ++i)
  202.             dblEmpiricalLoss += java.lang.Math.pow (java.lang.Math.abs (funcLearnerR1ToR1.evaluate (adblX[i])
  203.                 - adblY[i]), _dblLossExponent);

  204.         return dblEmpiricalLoss / _dblLossExponent;
  205.     }

  206.     @Override public double empiricalLoss (
  207.         final org.drip.function.definition.RdToR1 funcLearnerRdToR1,
  208.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviX,
  209.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviY)
  210.         throws java.lang.Exception
  211.     {
  212.         if (null == funcLearnerRdToR1 || null == gvviX || !(gvviX instanceof
  213.             org.drip.spaces.instance.ValidatedRd) || null == gvviY || !(gvviY instanceof
  214.                 org.drip.spaces.instance.ValidatedR1))
  215.             throw new java.lang.Exception ("LpLossLearner::empiricalLoss => Invalid Inputs");

  216.         double[][] aadblX = ((org.drip.spaces.instance.ValidatedRd) gvviX).instance();

  217.         double[] adblY = ((org.drip.spaces.instance.ValidatedR1) gvviY).instance();

  218.         double dblEmpiricalLoss = 0.;
  219.         int iNumSample = aadblX.length;

  220.         if (iNumSample != adblY.length)
  221.             throw new java.lang.Exception ("LpLossLearner::empiricalLoss => Invalid Inputs");

  222.         for (int i = 0; i < iNumSample; ++i)
  223.             dblEmpiricalLoss += java.lang.Math.pow (java.lang.Math.abs (funcLearnerRdToR1.evaluate
  224.                 (aadblX[i]) - adblY[i]), _dblLossExponent);

  225.         return dblEmpiricalLoss / _dblLossExponent;
  226.     }

  227.     @Override public double empiricalRisk (
  228.         final org.drip.measure.continuous.R1R1 distR1R1,
  229.         final org.drip.function.definition.R1ToR1 funcLearnerR1ToR1,
  230.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviX,
  231.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviY)
  232.         throws java.lang.Exception
  233.     {
  234.         if (null == distR1R1 || null == funcLearnerR1ToR1 || null == gvviX || !(gvviX instanceof
  235.             org.drip.spaces.instance.ValidatedR1) || null == gvviY || !(gvviY instanceof
  236.                 org.drip.spaces.instance.ValidatedR1))
  237.             throw new java.lang.Exception ("LpLossLearner::empiricalRisk => Invalid Inputs");

  238.         double[] adblX = ((org.drip.spaces.instance.ValidatedR1) gvviX).instance();

  239.         double[] adblY = ((org.drip.spaces.instance.ValidatedR1) gvviY).instance();

  240.         double dblNormalizer = 0.;
  241.         double dblEmpiricalLoss = 0.;
  242.         int iNumSample = adblX.length;

  243.         if (iNumSample != adblY.length)
  244.             throw new java.lang.Exception ("LpLossLearner::empiricalRisk => Invalid Inputs");

  245.         for (int i = 0; i < iNumSample; ++i) {
  246.             double dblDensity = distR1R1.density (adblX[i], adblY[i]);

  247.             dblNormalizer += dblDensity;

  248.             dblEmpiricalLoss += dblDensity * java.lang.Math.pow (java.lang.Math.abs
  249.                 (funcLearnerR1ToR1.evaluate (adblX[i]) - adblY[i]), _dblLossExponent);
  250.         }

  251.         return dblEmpiricalLoss / _dblLossExponent / dblNormalizer;
  252.     }

  253.     @Override public double empiricalRisk (
  254.         final org.drip.measure.continuous.RdR1 distRdR1,
  255.         final org.drip.function.definition.RdToR1 funcLearnerRdToR1,
  256.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviX,
  257.         final org.drip.spaces.instance.GeneralizedValidatedVector gvviY)
  258.         throws java.lang.Exception
  259.     {
  260.         if (null == distRdR1 || null == funcLearnerRdToR1 || null == gvviX || !(gvviX instanceof
  261.             org.drip.spaces.instance.ValidatedRd) || null == gvviY || !(gvviY instanceof
  262.                 org.drip.spaces.instance.ValidatedR1))
  263.             throw new java.lang.Exception ("LpLossLearner::empiricalRisk => Invalid Inputs");

  264.         double[][] aadblX = ((org.drip.spaces.instance.ValidatedRd) gvviX).instance();

  265.         double[] adblY = ((org.drip.spaces.instance.ValidatedR1) gvviY).instance();

  266.         double dblNormalizer = 0.;
  267.         double dblEmpiricalLoss = 0.;
  268.         int iNumSample = aadblX.length;

  269.         if (iNumSample != adblY.length)
  270.             throw new java.lang.Exception ("LpLossLearner::empiricalRisk => Invalid Inputs");

  271.         for (int i = 0; i < iNumSample; ++i) {
  272.             double dblDensity = distRdR1.density (aadblX[i], adblY[i]);

  273.             dblNormalizer += dblDensity;

  274.             dblEmpiricalLoss += dblDensity * java.lang.Math.pow (java.lang.Math.abs
  275.                 (funcLearnerRdToR1.evaluate (aadblX[i]) - adblY[i]), _dblLossExponent);
  276.         }

  277.         return dblEmpiricalLoss / _dblLossExponent / dblNormalizer;
  278.     }
  279. }