LCOV - code coverage report
Current view: top level - src - mo_kernel.f90 (source / functions) Hit Total Coverage
Test: forces coverage Lines: 330 1086 30.4 %
Date: 2024-03-13 19:03:28 Functions: 16 39 41.0 %

          Line data    Source code
       1             : !> \file mo_kernel.f90
       2             : !> \brief \copybrief mo_kernel
       3             : !> \details \copydetails mo_kernel
       4             : 
       5             : !> \brief   Module for kernel regression and kernel density estimation.
       6             : !> \details This module provides routines for kernel regression of data as well as kernel density
       7             : !!          estimation of both probability density functions (PDF) and cumulative density functions (CDF).\n
       8             : !!          So far only a Gaussian kernel is implemented (Nadaraya-Watson)
       9             : !!          which can be used for one- and multidimensional data.\n
      10             : !!          Furthermore, the estimation of the bandwith needed for kernel methods is provided
      11             : !!          by either Silverman''s rule of thumb or a Cross-Vaildation approach.\n
      12             : !!          The Cross-Validation method is actually an optimization of the bandwith and
      13             : !!          might be the most costly part of the kernel smoother.
      14             : !!          Therefore, the bandwith estimation is not necessarily part of the kernel smoothing
      15             : !!          but can be determined first and given as an optional argument to the smoother.
      16             : !> \changelog
      17             : !! - Juliane Mai, Mar 2013
      18             : !! - Stephan Thober, Mar 2013
      19             : !! - Matthias Cuntz, Mar 2013
      20             : !! - Matthias Cuntz, May 2013
      21             : !!   - sort -> qsort
      22             : !!   - module procedure golden
      23             : !! - Stephan Thober, Jul 2015
      24             : !!   - using sort_index in favor of qsort_index
      25             : !! - Matthias Cuntz, Mar 2016
      26             : !!   - Romberg integration in cumdensity
      27             : !> - Juliane Mai
      28             : !> \date Mar 2013
      29             : !> \copyright Copyright 2005-\today, the CHS Developers, Sabine Attinger: All rights reserved.
      30             : !! FORCES is released under the LGPLv3+ license \license_note
      31             : MODULE mo_kernel
      32             : 
      33             :   !$ USE omp_lib
      34             :   USE mo_kind,      ONLY: i4, sp, dp
      35             :   USE mo_moment,    ONLY: stddev
      36             : 
      37             :   IMPLICIT NONE
      38             : 
      39             :   PUBLIC :: kernel_cumdensity        ! Kernel smoothing of a CDF                 (only 1D)
      40             :   PUBLIC :: kernel_density           ! Kernel smoothing of a PDF                 (only 1D)
      41             :   PUBLIC :: kernel_density_h         ! Bandwith estimation for PDF and CDF       (only 1D)
      42             :   PUBLIC :: kernel_regression        ! Kernel regression                         (1D and ND)
      43             :   PUBLIC :: kernel_regression_h      ! Bandwith estimation for kernel regression (1D and ND)
      44             : 
      45             : 
      46             :   ! ------------------------------------------------------------------
      47             :   !>        \brief   Approximates the cumulative density function (CDF).
      48             :   !>        \details Approximates the cumulative density function (CDF) to a given 1D data set using a Gaussian kernel.
      49             :   !!
      50             :   !!        The bandwith of the kernel can be pre-determined using the function kernel_density_h and
      51             :   !!        specified by the optional argument h.
      52             :   !!        If h is not given the default method to approximate the bandwith h is Silverman''s rule-of-thumb
      53             :   !!               \f[ h = \frac{4}{3}^{0.2} n^{-0.2} \sigma_x \f]
      54             :   !!        where n is the number of given data points and \f$ \sigma \f$ is the standard deviation of the data.\n
      55             :   !!        If the optional argument silverman is set to false, the cross-validation method described
      56             :   !!        by Scott et al. (2005) is applied.
      57             :   !!        Therefore, the likelihood of a given h is maximized using the Nelder-Mead algorithm nelminrange.
      58             :   !!        For large data sets this might be time consuming and should be performed aforehand using the
      59             :   !!        function kernel_density_h.\n
      60             :   !!        The dataset x can be single or double precision. The result will have the same numerical precision.\n
      61             :   !!        If the CDF for other datapoints than x is needed the optional argument xout has to be specified.
      62             :   !!        The result will than be of the same size and precision as xout.
      63             :   !!
      64             :   !!        \b Example
      65             :   !!        \code{.f90}
      66             :   !!        ! given data, e.g. temperature
      67             :   !!        x = (/ 26.1_dp, 24.5_dp, 24.8_dp, 24.5_dp, 24.1_dp /)
      68             :   !!
      69             :   !!        ! estimate bandwidth via cross-validation (time-consuming)
      70             :   !!        h = kernel_density_h(x,silverman=.false.)
      71             :   !!
      72             :   !!        ! estimate bandwidth with Silverman''s rule of thumb (default)
      73             :   !!        h = kernel_density_h(x,silverman=.true.)
      74             :   !!
      75             :   !!        ! calc cumulative density with the estimated bandwidth h at given output points xout
      76             :   !!        cdf = kernel_cumdensity(x, h=h, xout=xout)
      77             :   !!        ! gives cumulative density at xout values, if specified, or at x values, if xout is not present
      78             :   !!        ! if bandwith h is given                 : silverman (true/false) is ignored
      79             :   !!        ! if silverman=.true.  and h not present : bandwith will be estimated using Silerman''s rule of thumb
      80             :   !!        ! if silverman=.false. and h not present : bandwith will be estimated using Cross-Validation approach
      81             :   !!        \endcode
      82             :   !!
      83             :   !!        -> see also example in test directory
      84             :   !!
      85             :   !!        \b Literature
      86             :   !!        1. Scott, D. W., & Sain, S. R. (2005).
      87             :   !!             Multi-dimensional Density Estimation. Handbook of Statistics, 24, 229-261.
      88             :   !!             doi:10.1016/S0169-7161(04)24009-3
      89             :   !!        2. Haerdle, W., & Mueller, M. (2000). Multivariate and semiparametric kernel regression.
      90             :   !!            In M. G. Schimek (Ed.), Smoothing and regression: Approaches, computation, and
      91             :   !!             application (pp. 357-392). Hoboken, NJ, USA: John Wiley & Sons, Inc. doi:10.1002/9781118150658.ch12s,
      92             :   !!             Cambridge University Press, UK, 1996
      93             :   !!
      94             :   !>       \param[in] "real(sp/dp) :: x(:)"                 \f$ x_i \f$ 1D-array with data points
      95             :   !>       \param[in] "real(sp/dp), optional :: h"          Bandwith of kernel.\n
      96             :   !!                                                        If present, argument silverman is ignored.
      97             :   !!                                                        If not present, the bandwith will be approximated first.
      98             :   !>       \param[in] "logical, optional :: silverman"      By default Silverman''s Rule-of-thumb will be used to approximate
      99             :   !!                                                        the bandwith of the kernel (silverman=true).
     100             :   !!                                                        If silverman=false the Cross-Validation approach is used
     101             :   !!                                                        to estimate the bandwidth.
     102             :   !>       \param[in] "real(sp/dp), optional :: xout(:)"    If present, the CDF will be approximated at this arguments,
     103             :   !!                                                        otherwise the CDF is approximated at x.
     104             :   !>       \param[in] "logical, optional :: romberg"        If .true., use Romberg converging integration
     105             :   !!                                                        If .true., use 5-point Newton-Cotes with fixed nintegrate points
     106             :   !>       \param[in] "integer(i4), optional :: nintegrate" 2**nintegrate if Romberg or nintegrate number of sampling
     107             :   !!                                                        points for integration between output points.
     108             :   !!                                                        Default: 10 if Romberg, 101 otherwise.
     109             :   !>       \param[in] "real(sp/dp), optional :: epsint"     maximum relative error for Romberg integration.
     110             :   !!                                                        Default: 1e-6.
     111             :   !>       \param[in] "logical, optional :: mask(:)"        mask x values at calculation.\n
     112             :   !!                                                        if not xout given, then kernel estimates will have nodata value.
     113             :   !>       \param[in] "real(sp/dp), optional :: nodata"     if mask and not xout given, then masked data will
     114             :   !!                                                        have nodata kernel estimate.
     115             :   !>       \return    real(sp/dp), allocatable :: kernel_cumdensity(:) — smoothed CDF at either x or xout
     116             : 
     117             :   !>       \note Data need to be one-dimensional. Multi-dimensional data handling not implemented yet.
     118             : 
     119             :   !>       \author Juliane Mai
     120             :   !>       \date Mar 2013
     121             :   !>       \author Matthias Cuntz
     122             :   !>       \date Mar 2013
     123             :   !>       \date May 2013
     124             :   !!             - sort -> qsort
     125             :   !>       \author Matthias Cuntz
     126             :   !>       \date Mar 2016
     127             :   !!             - Romberg integration in cumdensity
     128             :   INTERFACE kernel_cumdensity
     129             :      MODULE PROCEDURE kernel_cumdensity_1d_dp, kernel_cumdensity_1d_sp
     130             :   END INTERFACE kernel_cumdensity
     131             : 
     132             : 
     133             :   ! ------------------------------------------------------------------
     134             :   !>        \brief   Approximates the probability density function (PDF).
     135             :   !>        \details Approximates the probability density function (PDF) to a given 1D data set using a Gaussian kernel.
     136             :   !!
     137             :   !!        The bandwith of the kernel can be pre-determined using the function kernel_density_h and specified
     138             :   !!        by the optional argument h.
     139             :   !!        If h is not given the default method to approximate the bandwith h is Silverman''s rule-of-thumb
     140             :   !!               \f[ h = \frac{4}{3}^{0.2} n^{-0.2} \sigma_x \f]
     141             :   !!        where n is the number of given data points and \f$ \sigma \f$ is the standard deviation of the data.\n
     142             :   !!        If the optional argument silverman is set to false, the cross-validation method described
     143             :   !!        by Scott et al. (2005) is applied.
     144             :   !!        Therefore, the likelihood of a given h is maximized using the Nelder-Mead algorithm nelminrange.
     145             :   !!        For large data sets this might be time consuming and should be performed aforehand using the function
     146             :   !!        kernel_density_h.\n
     147             :   !!        The dataset x can be single or double precision. The result will have the same numerical precision.\n
     148             :   !!        If the PDF for other datapoints than x is needed the optional argument xout has to be specified.
     149             :   !!        The result will than be of the same size and precision as xout.
     150             :   !!
     151             :   !!        \b Example
     152             :   !!        \code{.f90}
     153             :   !!        ! given data, e.g. temperature
     154             :   !!        x = (/ 26.1_dp, 24.5_dp, 24.8_dp, 24.5_dp, 24.1_dp /)
     155             :   !!
     156             :   !!        ! estimate bandwidth via cross-validation (time-consuming)
     157             :   !!        h = kernel_density_h(x,silverman=.false.)
     158             :   !!
     159             :   !!        ! estimate bandwidth with Silverman''s rule of thumb (default)
     160             :   !!        h = kernel_density_h(x,silverman=.true.)
     161             :   !!
     162             :   !!        ! calc cumulative density with the estimated bandwidth h at given output points xout
     163             :   !!        pdf = kernel_density(x, h=h, xout=xout)
     164             :   !!        ! gives (probability) density at either xout values, if specified, or at x values, if xout is not present
     165             :   !!        ! if bandwith h is given                 : silverman (true/false) is ignored
     166             :   !!        ! if silverman=.true.  and h not present : bandwith will be estimated using Silerman''s rule of thumb
     167             :   !!        ! if silverman=.false. and h not present : bandwith will be estimated using Cross-Validation approach
     168             :   !!        \endcode
     169             :   !!
     170             :   !!        -> see also example in test directory
     171             :   !!
     172             :   !!        \b Literature
     173             :   !!        1. Scott, D. W., & Sain, S. R. (2005).
     174             :   !!             Multi-dimensional Density Estimation. Handbook of Statistics, 24, 229-261.
     175             :   !!             doi:10.1016/S0169-7161(04)24009-3
     176             :   !!        2. Haerdle, W., & Mueller, M. (2000). Multivariate and semiparametric kernel regression.
     177             :   !!            In M. G. Schimek (Ed.), Smoothing and regression: Approaches, computation, and
     178             :   !!             application (pp. 357-392). Hoboken, NJ, USA: John Wiley & Sons, Inc. doi:10.1002/9781118150658.ch12s,
     179             :   !!             Cambridge University Press, UK, 1996
     180             :   !!
     181             :   !>       \param[in] "real(sp/dp) :: x(:)"        \f$ x_i \f$ 1D-array with data points
     182             :   !>       \param[in] "real(sp/dp), optional :: h"       Bandwith of kernel.\n
     183             :   !!                                                     If present, argument silverman is ignored.
     184             :   !!                                                     If not present, the bandwith will be approximated first.
     185             :   !>       \param[in] "logical, optional :: silverman"   By default Silverman''s Rule-of-thumb will be used to approximate
     186             :   !!                                                     the bandwith of the kernel (silverman=true).
     187             :   !!                                                     If silverman=false the Cross-Validation approach is used
     188             :   !!                                                     to estimate the bandwidth.
     189             :   !>       \param[in] "real(sp/dp), optional :: xout(:)" If present, the PDF will be approximated at this arguments,
     190             :   !!                                                     otherwise the PDF is approximated at x.
     191             :   !>       \param[in] "logical, optional :: mask(:)"     mask x values at calculation.\n
     192             :   !!                                                     if not xout given, then kernel estimates will have nodata value.
     193             :   !>       \param[in] "real(sp/dp), optional :: nodata"  if mask and not xout given, then masked data will
     194             :   !!                                                     have nodata kernel estimate.
     195             :   !>       \return     real(sp/dp), allocatable :: kernel_density(:) — smoothed PDF at either x or xout
     196             : 
     197             :   !>       \note Data need to be one-dimensional. Multi-dimensional data handling not implemented yet.
     198             : 
     199             :   !>        \author Juliane Mai
     200             :   !>        \date Mar 2013
     201             :   !>        \author Stephan Thober
     202             :   !>        \date Mar 2013
     203             :   !!              - mask and nodata
     204             :   !>        \author Matthias Cuntz
     205             :   !>        \date Mar 2013
     206             :   INTERFACE kernel_density
     207             :      MODULE PROCEDURE kernel_density_1d_dp,  kernel_density_1d_sp
     208             :   END INTERFACE kernel_density
     209             : 
     210             : 
     211             :   ! ------------------------------------------------------------------
     212             :   !>        \brief   Approximates the bandwith h of the kernel.
     213             :   !>        \details Approximates the bandwith h of the kernel for a given dataset x
     214             :   !!                 either using Silverman''s rule-of-thumb or a cross-validation method.\n
     215             :   !!
     216             :   !!        By default the bandwidth h is approximated by Silverman''s rule-of-thumb
     217             :   !!               \f[ h = \frac{4}{3}^{0.2} n^{-0.2} \sigma_x \f]
     218             :   !!        where n is the number of given data points and \f$ \sigma \f$ is the standard deviation of the data.\n
     219             :   !!        If the optional argument silverman is set to false, the cross-validation method described
     220             :   !!        by Scott et al. (2005) is applied.
     221             :   !!        Therefore, the likelihood of a given h is maximized using the Nelder-Mead algorithm nelminrange.
     222             :   !!        For large data sets this might be time consuming and should be performed aforehand using the
     223             :   !!        function kernel_density_h.\n
     224             :   !!        The dataset x can be single or double precision. The result will have the same numerical precision.\n
     225             :   !!        The result of this function can be used as the optional input for kernel_density and kernel_cumdensity.
     226             :   !!
     227             :   !!        \b Example
     228             :   !!        \code{.f90}
     229             :   !!        ! given data, e.g. temperature
     230             :   !!        x = (/ 26.1_dp, 24.5_dp, 24.8_dp, 24.5_dp, 24.1_dp /)
     231             :   !!
     232             :   !!        ! estimate bandwidth via cross-validation (time-consuming)
     233             :   !!        h = kernel_density_h(x,silverman=.false.)
     234             :   !!
     235             :   !!        ! estimate bandwidth with Silverman''s rule of thumb (default)
     236             :   !!        h = kernel_density_h(x,silverman=.true.)
     237             :   !!        \endcode
     238             :   !!
     239             :   !!        -> see also example in test directory
     240             :   !!
     241             :   !!        \b Literature
     242             :   !!        1. Scott, D. W., & Sain, S. R. (2005).
     243             :   !!             Multi-dimensional Density Estimation. Handbook of Statistics, 24, 229-261.
     244             :   !!             doi:10.1016/S0169-7161(04)24009-3
     245             :   !!        2. Haerdle, W., & Mueller, M. (2000). Multivariate and semiparametric kernel regression.
     246             :   !!            In M. G. Schimek (Ed.), Smoothing and regression: Approaches, computation, and
     247             :   !!             application (pp. 357-392). Hoboken, NJ, USA: John Wiley & Sons, Inc. doi:10.1002/9781118150658.ch12s,
     248             :   !!             Cambridge University Press, UK, 1996
     249             :   !!
     250             :   !>       \param[in] "real(sp/dp) :: x(:)"        \f$ x_i \f$ 1D-array with data points
     251             :   !>       \param[in] "logical, optional :: silverman"   By default Silverman''s Rule-of-thumb will be used to approximate
     252             :   !!                                                     the bandwith of the kernel (silverman=true).
     253             :   !!                                                     If silverman=false the Cross-Validation approach is used
     254             :   !!                                                     to estimate the bandwidth.
     255             :   !>       \param[in] "logical, optional :: mask(:)"     mask x values at calculation.
     256             :   !>       \return     real(sp/dp), allocatable :: kernel_density_h(:) — approximated bandwith h for kernel smoother
     257             : 
     258             :   !>       \note Data need to be one-dimensional. Multi-dimensional data handling not implemented yet.
     259             : 
     260             :   !>       \authors Juliane Mai, Matthias Cuntz
     261             :   !>       \date Mar 2013
     262             :   INTERFACE kernel_density_h
     263             :      MODULE PROCEDURE kernel_density_h_1d_dp, kernel_density_h_1d_sp
     264             :   END INTERFACE kernel_density_h
     265             : 
     266             : 
     267             :   ! ------------------------------------------------------------------
     268             :   !>        \brief   Multi-dimensional non-parametric kernel regression.
     269             :   !>        \details Multi-dimensional non-parametric kernel regression using a Gaussian kernel.
     270             :   !!
     271             :   !!        The bandwith of the kernel can be pre-determined using the function kernel_regression_h and specified
     272             :   !!        by the optional argument h.
     273             :   !!        If h is not given the default method to approximate the bandwith h is Silverman''s rule-of-thumb
     274             :   !!               \f[ h = \frac{4}{d+2}^{1/(d+4)} n^{-1/(d+4)} \sigma_{x_i} \f]
     275             :   !!        where \f$ n \f$ is the number of given data points, \f$ d \f$ is the number of dimensions,
     276             :   !!        and \f$ \sigma_{x_i} \f$ is the standard deviation of the data of dimension \f$ i \f$.\n
     277             :   !!        If the optional argument silverman is set to false, the cross-validation method described
     278             :   !!        by Scott et al. (2005) is applied.
     279             :   !!        Therefore, the likelihood of a given h is maximized using the Nelder-Mead algorithm nelminrange.
     280             :   !!        For large data sets this might be time consuming and should be performed aforehand
     281             :   !!        using the function kernel_regression_h.\n
     282             :   !!        The dataset (x,y) can be single or double precision. The result will have the same numerical precision.\n
     283             :   !!        If the regression for other datapoints than x is needed the optional argument xout has to be specified.
     284             :   !!        The result will than be of the same size and precision as xout.\n
     285             :   !!
     286             :   !!        \b Example
     287             :   !!        \code{.f90}
     288             :   !!        The code is adapted from the kernel_regression.py of the jams_python library.
     289             :   !!        ! given data, e.g. temperature
     290             :   !!        x(:,1) = (/ 26.1_dp, 24.5_dp, 24.8_dp, 14.5_dp,  2.1_dp /)
     291             :   !!        x(:,2) = (/  2.1_dp,  4.5_dp,  6.8_dp,  4.8_dp,  0.1_dp /)
     292             :   !!        y      = (/ 28.2_dp, 29.0_dp, 31.6_dp, 19.3_dp,  2.2_dp /)
     293             :   !!
     294             :   !!        ! estimate bandwidth via cross-validation (time-consuming)
     295             :   !!        h = kernel_regression_h(x,y,silverman=.false.)
     296             :   !!
     297             :   !!        ! estimate bandwidth with Silverman''s rule of thumb (default)
     298             :   !!        h = kernel_regression_h(x,y,silverman=.true.)
     299             :   !!
     300             :   !!        fit_y = kernel_regression(x, y, h=h, silverman=.false., xout=xout)
     301             :   !!        ! gives fitted values at either xout values, if specified, or at x values, if xout is not present
     302             :   !!        ! if bandwith h is given                 : silverman (true/false) is ignored
     303             :   !!        ! if silverman=.true.  and h not present : bandwith will be estimated using Silerman''s rule of thumb
     304             :   !!        ! if silverman=.false. and h not present : bandwith will be estimated using Cross-Validation approach
     305             :   !!        \endcode
     306             :   !!
     307             :   !!        -> see also example in test directory
     308             :   !!
     309             :   !!        \b Literature
     310             :   !!        1. Scott, D. W., & Sain, S. R. (2005).
     311             :   !!             Multi-dimensional Density Estimation. Handbook of Statistics, 24, 229-261.
     312             :   !!             doi:10.1016/S0169-7161(04)24009-3
     313             :   !!        2. Haerdle, W., & Mueller, M. (2000). Multivariate and semiparametric kernel regression.
     314             :   !!            In M. G. Schimek (Ed.), Smoothing and regression: Approaches, computation, and
     315             :   !!             application (pp. 357-392). Hoboken, NJ, USA: John Wiley & Sons, Inc. doi:10.1002/9781118150658.ch12s,
     316             :   !!             Cambridge University Press, UK, 1996
     317             :   !!
     318             :   !>        \param[in] "real(sp/dp) :: x(:)/x(:,:)"      1D or ND array with independent variables
     319             :   !>        \param[in] "real(sp/dp) :: y(:)"             1D array of dependent variables y(i) = f(x(i:)) with unknown f
     320             :   !>        \param[in] "real(sp/dp), optional :: h"      Bandwith of kernel.\n
     321             :   !!                                                     If present, argument silverman is ignored.
     322             :   !!                                                     If not present, the bandwith will be approximated first.
     323             :   !>        \param[in] "logical, optional :: silverman"   By default Silverman''s Rule-of-thumb will be used to approximate the
     324             :   !!                                                     bandwith of the kernel (silverman=true).
     325             :   !!                                                     If silverman=false the Cross-Validation approach is used
     326             :   !!                                                     to estimate the bandwidth.
     327             :   !>        \param[in] "real(sp/dp), optional :: xout(:)/xout(:,:)"
     328             :   !!                                                     If present, the fitted values will be returned for
     329             :   !!                                                     this independent variables,
     330             :   !!                                                     otherwise the fitted values at x are returned.
     331             :   !>        \param[in] "logical, optional :: mask(:)"     mask y values at calculation.\n
     332             :   !!                                                     if not xout given, then kernel estimates will have nodata value.
     333             :   !>        \param[in] "real(sp/dp), optional :: nodata"  if mask and not xout given, then masked data will
     334             :   !!                                                     have nodata kernel estimate.
     335             :   !>        \return     real(sp/dp), allocatable :: kernel_regression(:) — fitted values at eighter x or xout
     336             : 
     337             :   !>        \authors Matthias Cuntz, Juliane Mai
     338             :   !>        \date Mar 2013
     339             :   INTERFACE kernel_regression
     340             :      MODULE PROCEDURE kernel_regression_2d_dp, kernel_regression_2d_sp, &
     341             :           kernel_regression_1d_dp, kernel_regression_1d_sp
     342             :   END INTERFACE kernel_regression
     343             : 
     344             : 
     345             :   ! ------------------------------------------------------------------
     346             :   !>        \brief   Approximates the bandwith h of the kernel for regression.
     347             :   !>        \details  Approximates the bandwith h of the kernel for a given dataset x
     348             :   !!                  either using Silverman''s rule-of-thumb or a cross-validation method.\n
     349             :   !!
     350             :   !!        By default the bandwidth h is approximated by Silverman''s rule-of-thumb
     351             :   !!               \f[ h = \frac{4}{d+2}^{1/(d+4)} n^{-1/(d+4)} \sigma_{x_i} \f]
     352             :   !!        where \f$ n \f$ is the number of given data points, \f$ d \f$ is the number of dimensions,
     353             :   !!        and \f$ \sigma_{x_i} \f$ is the standard deviation of the data of dimension \f$ i \f$.\n
     354             :   !!        If the optional argument silverman is set to false, the cross-validation method described
     355             :   !!        by Scott et al. (2005) is applied.
     356             :   !!        Therefore, the likelihood of a given h is maximized using the Nelder-Mead algorithm nelminrange.
     357             :   !!        For large data sets this might be time consuming and should be performed aforehand using the function kernel_density_h.\n
     358             :   !!        The dataset x can be single or double precision. The result will have the same numerical precision.\n
     359             :   !!        The result of this function can be used as the optional input for kernel_regression.\n
     360             :   !!
     361             :   !!        The code is adapted from the kernel_regression.py of the UFZ CHS Python library.
     362             :   !!
     363             :   !!        \b Example
     364             :   !!        \code{.f90}
     365             :   !!        ! given data, e.g. temperature
     366             :   !!        x(:,1) = (/ 26.1_dp, 24.5_dp, 24.8_dp, 14.5_dp,  2.1_dp /)
     367             :   !!        x(:,2) = (/  2.1_dp,  4.5_dp,  6.8_dp,  4.8_dp,  0.1_dp /)
     368             :   !!        y      = (/ 28.2_dp, 29.0_dp, 31.6_dp, 19.3_dp,  2.2_dp /)
     369             :   !!
     370             :   !!        ! estimate bandwidth via cross-validation (time-consuming)
     371             :   !!        h = kernel_regression_h(x,y,silverman=.false.)
     372             :   !!
     373             :   !!        ! estimate bandwidth with Silverman''s rule of thumb (default)
     374             :   !!        h = kernel_regression_h(x,y,silverman=.true.)
     375             :   !!        \endcode
     376             :   !!
     377             :   !!        -> see also example in test directory
     378             :   !!
     379             :   !!        \b Literature
     380             :   !!        1. Scott, D. W., & Sain, S. R. (2005).
     381             :   !!             Multi-dimensional Density Estimation. Handbook of Statistics, 24, 229-261.
     382             :   !!             doi:10.1016/S0169-7161(04)24009-3
     383             :   !!        2. Haerdle, W., & Mueller, M. (2000). Multivariate and semiparametric kernel regression.
     384             :   !!            In M. G. Schimek (Ed.), Smoothing and regression: Approaches, computation, and
     385             :   !!             application (pp. 357-392). Hoboken, NJ, USA: John Wiley & Sons, Inc. doi:10.1002/9781118150658.ch12s,
     386             :   !!             Cambridge University Press, UK, 1996
     387             :   !!
     388             :   !>        \param[in] "real(sp/dp) :: x(:)/x(:,:)"  1D or ND array with independent variables
     389             :   !>        \param[in] "real(sp/dp) :: y(:)"         1D array of dependent variables y(i) = f(x(i:)) with unknown f
     390             :   !>        \param[in] "logical, optional :: silverman"   By default Silverman''s Rule-of-thumb will be used to approximate the
     391             :   !!                                                      bandwith of the kernel (silverman=true).
     392             :   !!                                                      If silverman=false the Cross-Validation approach is used
     393             :   !!                                                      to estimate the bandwidth.
     394             :   !>        \param[in] "logical, optional :: mask(:)"     mask x values at calculation.
     395             :   !>        \return     real(sp/dp), allocatable :: kernel_regression_h(:) — approximated bandwith h for kernel regression\n
     396             :   !!                                                                               number of bandwidths equals
     397             :   !!                                                                               number of independent variables
     398             : 
     399             :   !>        \authors Matthias Cuntz, Juliane Mai
     400             :   !>        \date Mar 2013
     401             :   INTERFACE kernel_regression_h
     402             :      MODULE PROCEDURE kernel_regression_h_2d_dp, kernel_regression_h_2d_sp, &
     403             :           kernel_regression_h_1d_dp, kernel_regression_h_1d_sp
     404             :   END INTERFACE kernel_regression_h
     405             : 
     406             : 
     407             :   ! ------------------------------------------------------------------
     408             : 
     409             :   INTERFACE nadaraya_watson
     410             :      MODULE PROCEDURE nadaraya_watson_2d_dp, nadaraya_watson_2d_sp, &
     411             :           nadaraya_watson_1d_dp, nadaraya_watson_1d_sp
     412             :   END INTERFACE nadaraya_watson
     413             : 
     414             :   INTERFACE allocate_globals
     415             :      MODULE PROCEDURE allocate_globals_2d_dp, allocate_globals_2d_sp, &
     416             :           allocate_globals_1d_dp, allocate_globals_1d_sp
     417             :   END INTERFACE allocate_globals
     418             : 
     419             :   INTERFACE cross_valid_density
     420             :      MODULE PROCEDURE cross_valid_density_1d_dp, cross_valid_density_1d_sp
     421             :   END INTERFACE cross_valid_density
     422             : 
     423             :   INTERFACE cross_valid_regression
     424             :      MODULE PROCEDURE cross_valid_regression_dp, cross_valid_regression_sp
     425             :   END INTERFACE cross_valid_regression
     426             : 
     427             :   INTERFACE mesh
     428             :      MODULE PROCEDURE mesh_dp, mesh_sp
     429             :   END INTERFACE mesh
     430             : 
     431             :   INTERFACE golden
     432             :      MODULE PROCEDURE golden_dp, golden_sp
     433             :   END INTERFACE golden
     434             : 
     435             :   INTERFACE trapzd
     436             :      MODULE PROCEDURE trapzd_dp, trapzd_sp
     437             :   END INTERFACE trapzd
     438             : 
     439             :   INTERFACE polint
     440             :      MODULE PROCEDURE polint_dp, polint_sp
     441             :   END INTERFACE polint
     442             : 
     443             :   PRIVATE
     444             : 
     445             :   ! Module variables which need to be public for optimization of bandwith via cross-validation
     446             :   real(sp), dimension(:,:), allocatable :: global_x_sp
     447             :   real(sp), dimension(:,:), allocatable :: global_xout_sp
     448             :   real(sp), dimension(:),   allocatable :: global_y_sp
     449             : 
     450             :   real(dp), dimension(:,:), allocatable :: global_x_dp
     451             :   real(dp), dimension(:,:), allocatable :: global_xout_dp
     452             :   real(dp), dimension(:),   allocatable :: global_y_dp
     453             :   !$OMP threadprivate(global_x_sp,global_xout_sp,global_y_sp,global_x_dp,global_xout_dp,global_y_dp)
     454             : 
     455             : 
     456             :   ! ------------------------------------------------------------------------------------------------
     457             : 
     458             : CONTAINS
     459             : 
     460           3 :   function kernel_cumdensity_1d_dp(ix, h, silverman, xout, romberg, nintegrate, epsint, mask, nodata)
     461             : 
     462             :     use mo_utils,     only: le, linspace
     463             :     ! use mo_quicksort, only: qsort_index !ST: may lead to Segmentation Fault for large arrays > 600 000 entries
     464             :     ! use mo_sort,      only: sort_index
     465             :     use mo_orderpack, only: sort_index ! MC: use orderpack for no NR until somebody complains
     466             :     use mo_integrate, only: int_regular
     467             : 
     468             :     implicit none
     469             : 
     470             :     real(dp), dimension(:),                       intent(in) :: ix
     471             :     real(dp),                           optional, intent(in) :: h
     472             :     logical,                            optional, intent(in) :: silverman
     473             :     real(dp), dimension(:),             optional, intent(in) :: xout
     474             :     logical,                            optional, intent(in) :: romberg
     475             :     integer(i4),                        optional, intent(in) :: nintegrate
     476             :     real(dp),                           optional, intent(in) :: epsint
     477             :     logical,  dimension(:),             optional, intent(in) :: mask
     478             :     real(dp),                           optional, intent(in) :: nodata
     479             :     real(dp), dimension(:), allocatable                      :: kernel_cumdensity_1d_dp
     480             : 
     481             :     ! local variables
     482             :     integer(i4)                            :: nin, nout
     483             :     integer(i4)                            :: ii, jj
     484           1 :     real(dp)                               :: hh      ! bandwidth
     485           1 :     real(dp),    dimension(:), allocatable :: xxout
     486           1 :     integer(i4), dimension(:), allocatable :: xindx
     487             :     ! real(dp)                               :: tmp
     488           1 :     real(dp),    dimension(:), allocatable :: x
     489             :     ! integration
     490             :     logical                :: doromberg                ! Romberg of Newton-Coates
     491           1 :     real(dp),  dimension(:), allocatable :: kernel_pdf ! kernel densities at xout
     492             :     integer(i4)            :: trapzmax                 ! maximum 2**trapzmax points per integration between xouts
     493           1 :     real(dp)               :: trapzreps                ! maximum relative integration error
     494             :     integer(i4), parameter :: kromb = 5                ! Romberg's method of order 2kromb
     495           1 :     real(dp)               :: a, b, k1, k2             ! integral limits and corresponding kernel densities
     496           2 :     real(dp)               :: qromb, dqromb            ! interpolated result and changeof consecutive calls to trapzd
     497           1 :     real(dp), dimension(:), allocatable :: s, hs       ! results and stepsize of consecutive calls to trapzd
     498           1 :     real(dp)               :: lower_x                  ! lowest support point at min(x) - 3 stddev(x)
     499           2 :     real(dp), dimension(1) :: klower_x                 ! kernel density estimate at lower_x
     500           1 :     real(dp)               :: delta                    ! stepsize for Newton-Coates
     501             : 
     502             :     ! consistency check - mask needs either nodata or xout
     503           1 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
     504           0 :        stop 'kernel_cumdensity_1d_dp: missing nodata value or xout with present(mask)'
     505             :     end if
     506             : 
     507             :     ! Pack if mask present
     508           1 :     if (present(mask)) then
     509           0 :        nin = count(mask)
     510           0 :        allocate(x(nin))
     511           0 :        x = pack(ix, mask)
     512             :     else
     513           1 :        nin = size(ix,1)
     514           3 :        allocate(x(nin))
     515          22 :        x = ix
     516             :     endif
     517             : 
     518             :     ! allocate
     519           1 :     if (present(xout)) then
     520           0 :        nout = size(xout,1)
     521           0 :        allocate(xxout(nout))
     522           0 :        allocate(xindx(nout))
     523           0 :        xxout = xout
     524             :     else
     525           1 :        nout = nin
     526           3 :        allocate(xxout(nout))
     527           3 :        allocate(xindx(nout))
     528          22 :        xxout = x
     529             :     end if
     530             :     ! sort the x
     531           1 :     xindx = sort_index(xxout)
     532          42 :     xxout = xxout(xindx)
     533             : 
     534           1 :     if (present(romberg)) then
     535           1 :        doromberg = romberg
     536             :     else
     537             :        doromberg = .false.
     538             :     end if
     539             : 
     540             :     ! maximum 2**nintegrate points for Romberg integration; (4*n+1) points for 5-point Newton-Cotes
     541           1 :     if (present(nintegrate)) then
     542           0 :        trapzmax = nintegrate
     543             :     else
     544           1 :        if (doromberg) then
     545           0 :           trapzmax = 10_i4
     546             :        else
     547           1 :           trapzmax = 101_i4
     548             :        endif
     549             :     endif
     550             : 
     551             :     ! maximum relative error for integration
     552           1 :     if (present(epsint)) then
     553           0 :        trapzreps = epsint
     554             :     else
     555             :        trapzreps = 1.0e-6_dp
     556             :     endif
     557             : 
     558             :     ! determine h
     559           1 :     if (present(h)) then
     560           1 :        hh = h
     561             :     else
     562           0 :        if (present(silverman)) then
     563           0 :           hh = kernel_density_h(x, silverman=silverman)
     564             :        else
     565           0 :           hh = kernel_density_h(x, silverman=.true.)
     566             :        end if
     567             :     end if
     568             : 
     569             :     ! cumulative integration of pdf with Simpson's and trapezoidal rules as in Numerical recipes (qsimp)
     570             :     ! We do the i=1 step of trapzd ourselves to save kernel evaluations
     571           3 :     allocate(kernel_pdf(nout))
     572           2 :     allocate(kernel_cumdensity_1d_dp(nout))
     573             : 
     574          22 :     lower_x  = minval(x) - 3.0_dp * stddev(x)
     575             : 
     576           1 :     if (doromberg) then
     577           0 :        allocate(s(trapzmax+1), hs(trapzmax+1))
     578             : 
     579           0 :        kernel_pdf = kernel_density(x, hh, xout=xxout)
     580           0 :        klower_x   = kernel_density(x, hh, xout=(/lower_x/))
     581             : 
     582             :        ! loop through all regression points
     583           0 :        do ii = 1, nout
     584           0 :           if (ii==1) then
     585           0 :              a  = lower_x
     586           0 :              k1 = klower_x(1)
     587             :           else
     588           0 :              a  = xxout(ii-1)
     589           0 :              k1 = kernel_pdf(ii-1)
     590             :           endif
     591           0 :           b  = xxout(ii)
     592           0 :           k2 = kernel_pdf(ii)
     593             :           ! Romberg integration
     594             :           ! first trapzd manually using kernel_pdf above
     595           0 :           jj       = 1
     596           0 :           s(jj)    = 0.5_dp * (b-a) * (k1+k2)
     597           0 :           hs(jj)   = 1.0_dp
     598           0 :           s(jj+1)  = s(jj)
     599           0 :           hs(jj+1) = 0.25_dp*hs(jj)
     600           0 :           do jj=2, trapzmax
     601           0 :              call trapzd(kernel_density_1d_dp, x, hh, a, b, s(jj), jj)
     602           0 :              if (jj >= kromb) then
     603           0 :                 call polint(hs(jj-kromb+1:jj), s(jj-kromb+1:jj), 0.0_dp, qromb, dqromb)
     604           0 :                 if (le(abs(dqromb),trapzreps*abs(qromb))) exit
     605             :              end if
     606           0 :              s(jj+1)  = s(jj)
     607           0 :              hs(jj+1) = 0.25_dp*hs(jj)
     608             :           end do
     609           0 :           if (ii==1) then
     610           0 :              kernel_cumdensity_1d_dp(ii) = qromb
     611             :           else
     612           0 :              kernel_cumdensity_1d_dp(ii) = kernel_cumdensity_1d_dp(ii-1) + qromb
     613             :           endif
     614             :        end do
     615             : 
     616           0 :        deallocate(s, hs)
     617             :     else
     618             :        ! loop through all regression points
     619          21 :        do ii = 1, nout
     620          21 :           if (ii .eq. 1_i4) then
     621           1 :              delta                       = (xxout(ii)-lower_x) / real(trapzmax-1,dp)
     622         103 :              kernel_pdf                  = kernel_density(x, hh, xout=linspace(lower_x, xxout(ii), trapzmax))
     623           1 :              kernel_cumdensity_1d_dp(1)  = int_regular(kernel_pdf, delta)
     624             :           else
     625          19 :              delta                       = (xxout(ii)-xxout(ii-1)) / real(trapzmax-1,dp)
     626        1957 :              kernel_pdf                  = kernel_density(x, hh, xout=linspace(xxout(ii-1), xxout(ii), trapzmax))
     627          19 :              kernel_cumdensity_1d_dp(ii) = kernel_cumdensity_1d_dp(ii-1) + int_regular(kernel_pdf, delta)
     628             :           end if
     629             :        end do
     630             :     endif
     631             : 
     632             :     ! scale to range [0,1]
     633             :     ! tmp = 1.0_dp / (kernel_cumdensity_1d_dp(nout) - kernel_cumdensity_1d_dp(1))
     634             :     ! kernel_cumdensity_1d_dp(:) = ( kernel_cumdensity_1d_dp(:) - kernel_cumdensity_1d_dp(1) ) * tmp
     635          21 :     kernel_cumdensity_1d_dp = min(kernel_cumdensity_1d_dp, 1.0_dp)
     636             : 
     637             :     ! resorting
     638          41 :     kernel_cumdensity_1d_dp(xindx) = kernel_cumdensity_1d_dp(:)
     639             : 
     640             :     ! check whether output has to be unpacked
     641           1 :     if (present(mask) .and. (.not. present(xout))) then
     642           0 :        deallocate(x)
     643           0 :        nin = size(ix,1)
     644           0 :        allocate(x(nin))
     645           0 :        x = unpack(kernel_cumdensity_1d_dp, mask, nodata)
     646           0 :        deallocate(kernel_cumdensity_1d_dp)
     647           0 :        allocate(kernel_cumdensity_1d_dp(nin))
     648           0 :        kernel_cumdensity_1d_dp = x
     649             :     end if
     650             : 
     651             :     ! clean up
     652           1 :     deallocate(xxout)
     653           1 :     deallocate(xindx)
     654           1 :     deallocate(kernel_pdf)
     655           1 :     deallocate(x)
     656             : 
     657           1 :   end function kernel_cumdensity_1d_dp
     658             : 
     659           0 :   function kernel_cumdensity_1d_sp(ix, h, silverman, xout, romberg, nintegrate, epsint, mask, nodata)
     660             : 
     661           1 :     use mo_utils,     only: le, linspace
     662             :     ! use mo_quicksort, only: qsort_index !ST: may lead to Segmentation Fault for large arrays > 600 000 entries
     663             :     ! use mo_sort,      only: sort_index
     664             :     use mo_orderpack, only: sort_index ! MC: use orderpack for no NR until somebody complains
     665             :     use mo_integrate, only: int_regular
     666             : 
     667             :     implicit none
     668             : 
     669             :     real(sp), dimension(:),                       intent(in) :: ix
     670             :     real(sp),                           optional, intent(in) :: h
     671             :     logical,                            optional, intent(in) :: silverman
     672             :     real(sp), dimension(:),             optional, intent(in) :: xout
     673             :     logical,                            optional, intent(in) :: romberg
     674             :     integer(i4),                        optional, intent(in) :: nintegrate
     675             :     real(sp),                           optional, intent(in) :: epsint
     676             :     logical,  dimension(:),             optional, intent(in) :: mask
     677             :     real(sp),                           optional, intent(in) :: nodata
     678             :     real(sp), dimension(:), allocatable                      :: kernel_cumdensity_1d_sp
     679             : 
     680             :     ! local variables
     681             :     integer(i4)                            :: nin, nout
     682             :     integer(i4)                            :: ii, jj
     683           0 :     real(sp)                               :: hh      ! bandwidth
     684           0 :     real(sp),    dimension(:), allocatable :: xxout
     685           0 :     integer(i4), dimension(:), allocatable :: xindx
     686             :     ! real(sp)                               :: tmp
     687           0 :     real(sp),    dimension(:), allocatable :: x
     688             :     ! integration
     689             :     logical                :: doromberg                ! Romberg of Newton-Coates
     690           0 :     real(sp),  dimension(:), allocatable :: kernel_pdf ! kernel densities at xout
     691             :     integer(i4)            :: trapzmax                 ! maximum 2**trapzmax points per integration between xouts
     692           0 :     real(sp)               :: trapzreps                ! maximum relative integration error
     693             :     integer(i4), parameter :: kromb = 5                ! Romberg's method of order 2kromb
     694           0 :     real(sp)               :: a, b, k1, k2             ! integral limits and corresponding kernel densities
     695           0 :     real(sp)               :: qromb, dqromb            ! interpolated result and changeof consecutive calls to trapzd
     696           0 :     real(sp), dimension(:), allocatable :: s, hs       ! results and stepsize of consecutive calls to trapzd
     697           0 :     real(sp)               :: lower_x                  ! lowest support point at min(x) - 3 stddev(x)
     698           0 :     real(sp), dimension(1) :: klower_x                 ! kernel density estimate at lower_x
     699           0 :     real(sp)               :: delta                    ! stepsize for Newton-Coates
     700             : 
     701             :     ! consistency check - mask needs either nodata or xout
     702           0 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
     703           0 :        stop 'kernel_cumdensity_1d_sp: missing nodata value or xout with present(mask)'
     704             :     end if
     705             : 
     706             :     ! Pack if mask present
     707           0 :     if (present(mask)) then
     708           0 :        nin = count(mask)
     709           0 :        allocate(x(nin))
     710           0 :        x = pack(ix, mask)
     711             :     else
     712           0 :        nin = size(ix,1)
     713           0 :        allocate(x(nin))
     714           0 :        x = ix
     715             :     endif
     716             : 
     717             :     ! allocate
     718           0 :     if (present(xout)) then
     719           0 :        nout = size(xout,1)
     720           0 :        allocate(xxout(nout))
     721           0 :        allocate(xindx(nout))
     722           0 :        xxout = xout
     723             :     else
     724           0 :        nout = nin
     725           0 :        allocate(xxout(nout))
     726           0 :        allocate(xindx(nout))
     727           0 :        xxout = x
     728             :     end if
     729             :     ! sort the x
     730           0 :     xindx = sort_index(xxout)
     731           0 :     xxout = xxout(xindx)
     732             : 
     733           0 :     if (present(romberg)) then
     734           0 :        doromberg = romberg
     735             :     else
     736             :        doromberg = .false.
     737             :     end if
     738             : 
     739             :     ! maximum 2**nintegrate points for Romberg integration; (4*n+1) points for 5-point Newton-Cotes
     740           0 :     if (present(nintegrate)) then
     741           0 :        trapzmax = nintegrate
     742             :     else
     743           0 :        if (doromberg) then
     744           0 :           trapzmax = 10_i4
     745             :        else
     746           0 :           trapzmax = 101_i4
     747             :        endif
     748             :     endif
     749             : 
     750             :     ! maximum relative error for integration
     751           0 :     if (present(epsint)) then
     752           0 :        trapzreps = epsint
     753             :     else
     754             :        trapzreps = 1.0e-6_sp
     755             :     endif
     756             : 
     757             :     ! determine h
     758           0 :     if (present(h)) then
     759           0 :        hh = h
     760             :     else
     761           0 :        if (present(silverman)) then
     762           0 :           hh = kernel_density_h(x, silverman=silverman)
     763             :        else
     764           0 :           hh = kernel_density_h(x, silverman=.true.)
     765             :        end if
     766             :     end if
     767             : 
     768             :     ! cumulative integration of pdf with Simpson's and trapezoidal rules as in Numerical recipes (qsimp)
     769             :     ! We do the i=1 step of trapzd ourselves to save kernel evaluations
     770           0 :     allocate(kernel_pdf(nout))
     771           0 :     allocate(kernel_cumdensity_1d_sp(nout))
     772             : 
     773           0 :     if (doromberg) then
     774           0 :        allocate(s(trapzmax+1), hs(trapzmax+1))
     775             : 
     776           0 :        kernel_pdf = kernel_density(x, hh, xout=xxout)
     777             : 
     778           0 :        lower_x  = minval(x) - 3.0_sp * stddev(x)
     779           0 :        klower_x = kernel_density(x, hh, xout=(/lower_x/))
     780             : 
     781             :        ! loop through all regression points
     782           0 :        do ii = 1, nout
     783           0 :           if (ii==1) then
     784           0 :              a  = lower_x
     785           0 :              k1 = klower_x(1)
     786             :           else
     787           0 :              a  = xxout(ii-1)
     788           0 :              k1 = kernel_pdf(ii-1)
     789             :           endif
     790           0 :           b  = xxout(ii)
     791           0 :           k2 = kernel_pdf(ii)
     792             :           ! Romberg integration
     793             :           ! first trapzd manually using kernel_pdf above
     794           0 :           jj       = 1
     795           0 :           s(jj)    = 0.5_sp * (b-a) * (k1+k2)
     796           0 :           hs(jj)   = 1.0_sp
     797           0 :           s(jj+1)  = s(jj)
     798           0 :           hs(jj+1) = 0.25_sp*hs(jj)
     799           0 :           do jj=2, trapzmax
     800           0 :              call trapzd(kernel_density_1d_sp, x, hh, a, b, s(jj), jj)
     801           0 :              if (jj >= kromb) then
     802           0 :                 call polint(hs(jj-kromb+1:jj), s(jj-kromb+1:jj), 0.0_sp, qromb, dqromb)
     803           0 :                 if (le(abs(dqromb),trapzreps*abs(qromb))) exit
     804             :              end if
     805           0 :              s(jj+1)  = s(jj)
     806           0 :              hs(jj+1) = 0.25_sp*hs(jj)
     807             :           end do
     808           0 :           if (ii==1) then
     809           0 :              kernel_cumdensity_1d_sp(ii) = qromb
     810             :           else
     811           0 :              kernel_cumdensity_1d_sp(ii) = kernel_cumdensity_1d_sp(ii-1) + qromb
     812             :           endif
     813             :        end do
     814             : 
     815           0 :        deallocate(s, hs)
     816             :     else
     817             :        ! loop through all regression points
     818           0 :        do ii = 1, nout
     819           0 :           if (ii .eq. 1_i4) then
     820           0 :              delta                       = (xxout(ii)-lower_x) / real(trapzmax-1,sp)
     821           0 :              kernel_pdf                  = kernel_density(x, hh, xout=linspace(lower_x, xxout(ii), trapzmax))
     822           0 :              kernel_cumdensity_1d_sp(1)  = int_regular(kernel_pdf, delta)
     823             :           else
     824           0 :              delta                       = (xxout(ii)-xxout(ii-1)) / real(trapzmax-1,sp)
     825           0 :              kernel_pdf                  = kernel_density(x, hh, xout=linspace(xxout(ii-1), xxout(ii), trapzmax))
     826           0 :              kernel_cumdensity_1d_sp(ii) = kernel_cumdensity_1d_sp(ii-1) + int_regular(kernel_pdf, delta)
     827             :           end if
     828             :        end do
     829             :     endif
     830             : 
     831             :     ! scale to range [0,1]
     832             :     ! tmp = 1.0_sp / (kernel_cumdensity_1d_sp(nout) - kernel_cumdensity_1d_sp(1))
     833             :     ! kernel_cumdensity_1d_sp(:) = ( kernel_cumdensity_1d_sp(:) - kernel_cumdensity_1d_sp(1) ) * tmp
     834           0 :     kernel_cumdensity_1d_sp = min(kernel_cumdensity_1d_sp, 1.0_sp)
     835             : 
     836             :     ! resorting
     837           0 :     kernel_cumdensity_1d_sp(xindx) = kernel_cumdensity_1d_sp(:)
     838             : 
     839             :     ! check whether output has to be unpacked
     840           0 :     if (present(mask) .and. (.not. present(xout))) then
     841           0 :        deallocate(x)
     842           0 :        nin = size(ix,1)
     843           0 :        allocate(x(nin))
     844           0 :        x = unpack(kernel_cumdensity_1d_sp, mask, nodata)
     845           0 :        deallocate(kernel_cumdensity_1d_sp)
     846           0 :        allocate(kernel_cumdensity_1d_sp(nin))
     847           0 :        kernel_cumdensity_1d_sp = x
     848             :     end if
     849             : 
     850             :     ! clean up
     851           0 :     deallocate(xxout)
     852           0 :     deallocate(xindx)
     853           0 :     deallocate(kernel_pdf)
     854           0 :     deallocate(x)
     855             : 
     856           0 :   end function kernel_cumdensity_1d_sp
     857             : 
     858             : 
     859             :   ! ------------------------------------------------------------------------------------------------
     860             : 
     861          42 :   function kernel_density_1d_dp(ix, h, silverman, xout, mask, nodata)
     862             : 
     863             :     implicit none
     864             : 
     865             :     real(dp), dimension(:),                       intent(in) :: ix
     866             :     real(dp),                           optional, intent(in) :: h
     867             :     logical,                            optional, intent(in) :: silverman
     868             :     real(dp), dimension(:),             optional, intent(in) :: xout
     869             :     logical,  dimension(:),             optional, intent(in) :: mask
     870             :     real(dp),                           optional, intent(in) :: nodata
     871             :     real(dp), dimension(:), allocatable                      :: kernel_density_1d_dp
     872             : 
     873             :     ! local variables
     874             :     integer(i4)                         :: nin, nout
     875             :     integer(i4)                         :: ii
     876          21 :     real(dp)                            :: hh
     877          21 :     real(dp), dimension(:), allocatable :: xxout
     878          21 :     real(dp), dimension(:), allocatable :: z
     879          21 :     real(dp)                            :: multiplier
     880          21 :     real(dp)                            :: thresh
     881          21 :     real(dp), dimension(:),        allocatable :: x
     882             : 
     883             :     ! consistency check - mask needs either nodata or xout
     884          21 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
     885           0 :        stop 'kernel_density_1d_dp: missing nodata value or xout with present(mask)'
     886             :     end if
     887             : 
     888             :     ! Pack if mask present
     889          21 :     if (present(mask)) then
     890           0 :        nin = count(mask)
     891           0 :        allocate(x(nin))
     892           0 :        x = pack(ix, mask)
     893             :     else
     894          21 :        nin = size(ix,1)
     895          63 :        allocate(x(nin))
     896         462 :        x = ix
     897             :     endif
     898          63 :     allocate(z(nin))
     899             : 
     900             :     ! output size
     901          21 :     if (present(xout)) then
     902          20 :        nout = size(xout,1)
     903          60 :        allocate(xxout(nout))
     904        2060 :        xxout = xout
     905             :     else
     906           1 :        nout = nin
     907           2 :        allocate(xxout(nout))
     908          22 :        xxout = x
     909             :     end if
     910             :     ! allocate output
     911          63 :     allocate(kernel_density_1d_dp(nout))
     912             : 
     913             :     ! determine h
     914          21 :     if (present(h)) then
     915          21 :        hh = h
     916             :     else
     917           0 :        if (present(silverman)) then
     918           0 :           hh = kernel_density_h(x, silverman=silverman)
     919             :        else
     920           0 :           hh = kernel_density_h(x, silverman=.true.)
     921             :        end if
     922             :     end if
     923             : 
     924          21 :     multiplier = 1.0_dp/(real(nin,dp)*hh)
     925          21 :     if (multiplier <= 1.0_dp) then
     926          21 :        thresh = tiny(1.0_dp)/multiplier
     927             :     else
     928             :        thresh = 0.0_dp
     929             :     endif
     930             :     ! loop through all regression points
     931        2061 :     do ii=1, nout
     932             :        ! scaled difference from regression point
     933       42840 :        z(:) = (x(:) - xxout(ii)) / hh
     934             :        ! nadaraya-watson estimator of gaussian multivariate kernel
     935        2040 :        kernel_density_1d_dp(ii) = nadaraya_watson(z)
     936        2061 :        if (kernel_density_1d_dp(ii) .gt. thresh) kernel_density_1d_dp(ii) = multiplier * kernel_density_1d_dp(ii)
     937             :     end do
     938             : 
     939             :     ! check whether output has to be unpacked
     940          21 :     if (present(mask) .and. (.not. present(xout))) then
     941           0 :        deallocate(x)
     942           0 :        nin = size(ix,1)
     943           0 :        allocate(x(nin))
     944           0 :        x = unpack(kernel_density_1d_dp, mask, nodata)
     945           0 :        deallocate(kernel_density_1d_dp)
     946           0 :        allocate(kernel_density_1d_dp(nin))
     947           0 :        kernel_density_1d_dp = x
     948             :     end if
     949             : 
     950             :     ! clean up
     951          21 :     deallocate(xxout)
     952          21 :     deallocate(x)
     953          21 :     deallocate(z)
     954             : 
     955          21 :   end function kernel_density_1d_dp
     956             : 
     957          21 :   function kernel_density_1d_sp(ix, h, silverman, xout, mask, nodata)
     958             : 
     959             :     implicit none
     960             : 
     961             :     real(sp), dimension(:),                       intent(in) :: ix
     962             :     real(sp),                           optional, intent(in) :: h
     963             :     logical,                            optional, intent(in) :: silverman
     964             :     real(sp), dimension(:),             optional, intent(in) :: xout
     965             :     logical,  dimension(:),             optional, intent(in) :: mask
     966             :     real(sp),                           optional, intent(in) :: nodata
     967             :     real(sp), dimension(:), allocatable                      :: kernel_density_1d_sp
     968             : 
     969             :     ! local variables
     970             :     integer(i4)                         :: nin, nout
     971             :     integer(i4)                         :: ii
     972           0 :     real(sp)                            :: hh
     973           0 :     real(sp), dimension(:), allocatable :: xxout
     974           0 :     real(sp), dimension(:), allocatable :: z
     975           0 :     real(sp)                            :: multiplier
     976           0 :     real(sp)                            :: thresh
     977           0 :     real(sp), dimension(:),        allocatable :: x
     978             : 
     979             :     ! consistency check - mask needs either nodata or xout
     980           0 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
     981           0 :        stop 'kernel_density_1d_sp: missing nodata value or xout with present(mask)'
     982             :     end if
     983             : 
     984             :     ! Pack if mask present
     985           0 :     if (present(mask)) then
     986           0 :        nin = count(mask)
     987           0 :        allocate(x(nin))
     988           0 :        x = pack(ix, mask)
     989             :     else
     990           0 :        nin = size(ix,1)
     991           0 :        allocate(x(nin))
     992           0 :        x = ix
     993             :     endif
     994           0 :     allocate(z(nin))
     995             : 
     996             :     ! output size
     997           0 :     if (present(xout)) then
     998           0 :        nout = size(xout,1)
     999           0 :        allocate(xxout(nout))
    1000           0 :        xxout = xout
    1001             :     else
    1002           0 :        nout = nin
    1003           0 :        allocate(xxout(nout))
    1004           0 :        xxout = x
    1005             :     end if
    1006             :     ! allocate output
    1007           0 :     allocate(kernel_density_1d_sp(nout))
    1008             : 
    1009             :     ! determine h
    1010           0 :     if (present(h)) then
    1011           0 :        hh = h
    1012             :     else
    1013           0 :        if (present(silverman)) then
    1014           0 :           hh = kernel_density_h(x, silverman=silverman)
    1015             :        else
    1016           0 :           hh = kernel_density_h(x, silverman=.true.)
    1017             :        end if
    1018             :     end if
    1019             : 
    1020           0 :     multiplier = 1.0_sp/(real(nin,sp)*hh)
    1021           0 :     if (multiplier <= 1.0_sp) then
    1022           0 :        thresh = tiny(1.0_sp)/multiplier
    1023             :     else
    1024             :        thresh = 0.0_sp
    1025             :     endif
    1026             :     ! loop through all regression points
    1027           0 :     do ii=1, nout
    1028             :        ! scaled difference from regression point
    1029           0 :        z(:) = (x(:) - xxout(ii)) / hh
    1030             :        ! nadaraya-watson estimator of gaussian multivariate kernel
    1031           0 :        kernel_density_1d_sp(ii) = nadaraya_watson(z)
    1032           0 :        if (kernel_density_1d_sp(ii) .gt. thresh) kernel_density_1d_sp(ii) = multiplier * kernel_density_1d_sp(ii)
    1033             :     end do
    1034             : 
    1035             :     ! check whether output has to be unpacked
    1036           0 :     if (present(mask) .and. (.not. present(xout))) then
    1037           0 :        deallocate(x)
    1038           0 :        nin = size(ix,1)
    1039           0 :        allocate(x(nin))
    1040           0 :        x = unpack(kernel_density_1d_sp, mask, nodata)
    1041           0 :        deallocate(kernel_density_1d_sp)
    1042           0 :        allocate(kernel_density_1d_sp(nin))
    1043           0 :        kernel_density_1d_sp = x
    1044             :     end if
    1045             : 
    1046             :     ! clean up
    1047           0 :     deallocate(xxout)
    1048           0 :     deallocate(x)
    1049           0 :     deallocate(z)
    1050             : 
    1051           0 :   end function kernel_density_1d_sp
    1052             : 
    1053             : 
    1054             :   ! ------------------------------------------------------------------------------------------------
    1055             : 
    1056           6 :   function kernel_density_h_1d_dp(ix, silverman, mask)
    1057             : 
    1058             :     implicit none
    1059             : 
    1060             :     real(dp), dimension(:),           intent(in) :: ix
    1061             :     logical,                optional, intent(in) :: silverman
    1062             :     logical,  dimension(:), optional, intent(in) :: mask
    1063             :     real(dp)                                     :: kernel_density_h_1d_dp
    1064             : 
    1065             :     ! local variables
    1066             :     integer(i4)              :: nin
    1067           3 :     real(dp)                 :: nn
    1068           3 :     real(dp)                 :: h, hmin, fmin ! fmin set but never referenced
    1069           9 :     real(dp), dimension(2)   :: bounds
    1070             :     real(dp), parameter      :: pre_h = 1.05922384104881_dp
    1071           3 :     real(dp), dimension(:), allocatable :: x
    1072             : 
    1073           3 :     if (present(mask)) then
    1074           0 :        nin = count(mask)
    1075           0 :        allocate(x(nin))
    1076           0 :        x = pack(ix, mask)
    1077             :     else
    1078           3 :        nin = size(ix,1)
    1079           9 :        allocate(x(nin))
    1080          66 :        x = ix
    1081             :     endif
    1082           3 :     nn = real(nin,dp)
    1083             : 
    1084             :     ! Default: Silverman's rule of thumb by
    1085             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1086             :     !h(1) = (4._dp/3._dp/real(nn,dp))**(0.2_dp) * stddev_x
    1087           3 :     h = pre_h/(nn**0.2_dp) * stddev(x(:))
    1088             : 
    1089           3 :     if (present(silverman)) then
    1090           3 :        if (.not. silverman) then
    1091         129 :           bounds(1) = max(0.2_dp * h, (maxval(x)-minval(x))/nn)
    1092           3 :           bounds(2) = 5.0_dp * h
    1093           3 :           call allocate_globals(x)
    1094           3 :           fmin = golden(bounds(1),h,bounds(2),cross_valid_density_1d_dp, 0.0001_dp,hmin)
    1095           3 :           fmin = fmin * 1.0_dp  ! just to avoid warnings of "Variable FMIN set but never referenced"
    1096           3 :           h = hmin
    1097           3 :           call deallocate_globals()
    1098             :        end if
    1099             :     end if
    1100             : 
    1101           3 :     kernel_density_h_1d_dp = h
    1102             : 
    1103           3 :     deallocate(x)
    1104             : 
    1105           0 :   end function kernel_density_h_1d_dp
    1106             : 
    1107           0 :   function kernel_density_h_1d_sp(ix, silverman, mask)
    1108             : 
    1109             :     implicit none
    1110             : 
    1111             :     real(sp), dimension(:),           intent(in) :: ix
    1112             :     logical,                optional, intent(in) :: silverman
    1113             :     logical,  dimension(:), optional, intent(in) :: mask
    1114             :     real(sp)                                     :: kernel_density_h_1d_sp
    1115             : 
    1116             :     ! local variables
    1117             :     integer(i4)              :: nin
    1118           0 :     real(sp)                 :: nn
    1119           0 :     real(sp)                 :: h, hmin, fmin ! fmin set but never referenced
    1120           0 :     real(sp), dimension(2)   :: bounds
    1121             :     real(sp), parameter      :: pre_h = 1.05922384104881_sp
    1122           0 :     real(sp), dimension(:), allocatable :: x
    1123             : 
    1124           0 :     if (present(mask)) then
    1125           0 :        nin = count(mask)
    1126           0 :        allocate(x(nin))
    1127           0 :        x = pack(ix, mask)
    1128             :     else
    1129           0 :        nin = size(ix,1)
    1130           0 :        allocate(x(nin))
    1131           0 :        x = ix
    1132             :     endif
    1133           0 :     nn = real(nin,sp)
    1134             : 
    1135             :     ! Default: Silverman's rule of thumb by
    1136             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1137             :     !h(1) = (4._sp/3._sp/real(nn,sp))**(0.2_sp) * stddev_x
    1138           0 :     h = pre_h/(nn**0.2_sp) * stddev(x(:))
    1139             : 
    1140           0 :     if (present(silverman)) then
    1141           0 :        if (.not. silverman) then
    1142           0 :           bounds(1) = max(0.2_sp * h, (maxval(x)-minval(x))/nn)
    1143           0 :           bounds(2) = 5.0_sp * h
    1144           0 :           call allocate_globals(x)
    1145           0 :           fmin = golden(bounds(1),h,bounds(2),cross_valid_density_1d_sp, 0.0001_sp,hmin)
    1146           0 :           fmin = fmin * 1.0_sp  ! just to avoid warnings of "Variable FMIN set but never referenced"
    1147           0 :           h = hmin
    1148           0 :           call deallocate_globals()
    1149             :        end if
    1150             :     end if
    1151             : 
    1152           0 :     kernel_density_h_1d_sp = h
    1153             : 
    1154           0 :     deallocate(x)
    1155             : 
    1156           3 :   end function kernel_density_h_1d_sp
    1157             : 
    1158             : 
    1159             :   ! ------------------------------------------------------------------------------------------------
    1160             : 
    1161           0 :   function kernel_regression_1d_dp(ix, iy, h, silverman, xout, mask, nodata)
    1162             : 
    1163             :     implicit none
    1164             : 
    1165             :     real(dp), dimension(:),                       intent(in) :: ix
    1166             :     real(dp), dimension(:),                       intent(in) :: iy
    1167             :     real(dp),                           optional, intent(in) :: h
    1168             :     logical,                            optional, intent(in) :: silverman
    1169             :     real(dp), dimension(:),             optional, intent(in) :: xout
    1170             :     logical,  dimension(:),             optional, intent(in) :: mask
    1171             :     real(dp),                           optional, intent(in) :: nodata
    1172             :     real(dp), dimension(:), allocatable                      :: kernel_regression_1d_dp
    1173             : 
    1174             :     ! local variables
    1175             :     integer(i4)                         :: nin, nout
    1176             :     integer(i4)                         :: ii
    1177           0 :     real(dp)                            :: hh
    1178           0 :     real(dp), dimension(:), allocatable :: xxout
    1179           0 :     real(dp), dimension(:), allocatable :: z
    1180           0 :     real(dp), dimension(:), allocatable :: x
    1181           0 :     real(dp), dimension(:), allocatable :: y
    1182             : 
    1183             :     ! consistency check - mask needs either nodata or xout
    1184           0 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
    1185           0 :        stop 'kernel_regression_1d_dp: missing nodata value or xout with present(mask)'
    1186             :     end if
    1187             : 
    1188           0 :     nin   = size(ix,1)
    1189             :     ! consistency checks of inputs
    1190           0 :     if (size(iy,1) .ne. nin) stop 'kernel_regression_1d_dp: size(x) /= size(y)'
    1191             : 
    1192             :     ! Pack if mask present
    1193           0 :     if (present(mask)) then
    1194           0 :        nin = count(mask)
    1195           0 :        allocate(x(nin))
    1196           0 :        allocate(y(nin))
    1197           0 :        x = pack(ix, mask)
    1198           0 :        y = pack(iy, mask)
    1199             :     else
    1200           0 :        nin = size(ix,1)
    1201           0 :        allocate(x(nin))
    1202           0 :        allocate(y(nin))
    1203           0 :        x = ix
    1204           0 :        y = iy
    1205             :     endif
    1206           0 :     allocate(z(nin))
    1207             : 
    1208             :     ! determine h
    1209           0 :     if (present(h)) then
    1210           0 :        hh = h
    1211             :     else
    1212           0 :        if (present(silverman)) then
    1213           0 :           hh = kernel_regression_h(x, y, silverman=silverman)
    1214             :        else
    1215           0 :           hh = kernel_regression_h(x, y, silverman=.true.)
    1216             :        end if
    1217             :     end if
    1218             : 
    1219             :     ! calc regression
    1220           0 :     if (present(xout)) then
    1221           0 :        nout = size(xout,1)
    1222           0 :        allocate(xxout(nout))
    1223           0 :        xxout = xout
    1224             :     else
    1225           0 :        nout = nin
    1226           0 :        allocate(xxout(nout))
    1227           0 :        xxout = x
    1228             :     end if
    1229             :     ! allocate output
    1230           0 :     allocate(kernel_regression_1d_dp(nout))
    1231             : 
    1232             :     ! loop through all regression points
    1233           0 :     do ii = 1, nout
    1234             :        ! scaled difference from regression point
    1235           0 :        z(:) = (x(:) - xxout(ii)) / hh
    1236             :        ! nadaraya-watson estimator of gaussian multivariate kernel
    1237           0 :        kernel_regression_1d_dp(ii) = nadaraya_watson(z, y)
    1238             :     end do
    1239             : 
    1240             :     ! check whether output has to be unpacked
    1241           0 :     if (present(mask) .and. (.not. present(xout))) then
    1242           0 :        deallocate(x)
    1243           0 :        nin = size(ix,1)
    1244           0 :        allocate(x(nin))
    1245           0 :        x = unpack(kernel_regression_1d_dp, mask, nodata)
    1246           0 :        deallocate(kernel_regression_1d_dp)
    1247           0 :        allocate(kernel_regression_1d_dp(nin))
    1248           0 :        kernel_regression_1d_dp = x
    1249             :     end if
    1250             : 
    1251             :     ! clean up
    1252           0 :     deallocate(xxout)
    1253           0 :     deallocate(x)
    1254           0 :     deallocate(y)
    1255           0 :     deallocate(z)
    1256             : 
    1257           0 :   end function kernel_regression_1d_dp
    1258             : 
    1259           0 :   function kernel_regression_1d_sp(ix, iy, h, silverman, xout, mask, nodata)
    1260             : 
    1261             :     implicit none
    1262             : 
    1263             :     real(sp), dimension(:),                       intent(in) :: ix
    1264             :     real(sp), dimension(:),                       intent(in) :: iy
    1265             :     real(sp),                           optional, intent(in) :: h
    1266             :     logical,                            optional, intent(in) :: silverman
    1267             :     real(sp), dimension(:),             optional, intent(in) :: xout
    1268             :     logical,  dimension(:),             optional, intent(in) :: mask
    1269             :     real(sp),                           optional, intent(in) :: nodata
    1270             :     real(sp), dimension(:), allocatable                      :: kernel_regression_1d_sp
    1271             : 
    1272             :     ! local variables
    1273             :     integer(i4)                         :: nin, nout
    1274             :     integer(i4)                         :: ii
    1275           0 :     real(sp)                            :: hh
    1276           0 :     real(sp), dimension(:), allocatable :: xxout
    1277           0 :     real(sp), dimension(:), allocatable :: z
    1278           0 :     real(sp), dimension(:), allocatable :: x
    1279           0 :     real(sp), dimension(:), allocatable :: y
    1280             : 
    1281             :     ! consistency check - mask needs either nodata or xout
    1282           0 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
    1283           0 :        stop 'kernel_regression_1d_sp: missing nodata value or xout with present(mask)'
    1284             :     end if
    1285             : 
    1286           0 :     nin   = size(ix,1)
    1287             :     ! consistency checks of inputs
    1288           0 :     if (size(iy,1) .ne. nin) stop 'kernel_regression_1d_sp: size(x) /= size(y)'
    1289             : 
    1290             :     ! Pack if mask present
    1291           0 :     if (present(mask)) then
    1292           0 :        nin = count(mask)
    1293           0 :        allocate(x(nin))
    1294           0 :        allocate(y(nin))
    1295           0 :        x = pack(ix, mask)
    1296           0 :        y = pack(iy, mask)
    1297             :     else
    1298           0 :        nin = size(ix,1)
    1299           0 :        allocate(x(nin))
    1300           0 :        allocate(y(nin))
    1301           0 :        x = ix
    1302           0 :        y = iy
    1303             :     endif
    1304           0 :     allocate(z(nin))
    1305             : 
    1306             :     ! determine h
    1307           0 :     if (present(h)) then
    1308           0 :        hh = h
    1309             :     else
    1310           0 :        if (present(silverman)) then
    1311           0 :           hh = kernel_regression_h(x, y, silverman=silverman)
    1312             :        else
    1313           0 :           hh = kernel_regression_h(x, y, silverman=.true.)
    1314             :        end if
    1315             :     end if
    1316             : 
    1317             :     ! calc regression
    1318           0 :     if (present(xout)) then
    1319           0 :        nout = size(xout,1)
    1320           0 :        allocate(xxout(nout))
    1321           0 :        xxout = xout
    1322             :     else
    1323           0 :        nout = nin
    1324           0 :        allocate(xxout(nout))
    1325           0 :        xxout = x
    1326             :     end if
    1327             :     ! allocate output
    1328           0 :     allocate(kernel_regression_1d_sp(nout))
    1329             : 
    1330             :     ! loop through all regression points
    1331           0 :     do ii = 1, nout
    1332             :        ! scaled difference from regression point
    1333           0 :        z(:) = (x(:) - xxout(ii)) / hh
    1334             :        ! nadaraya-watson estimator of gaussian multivariate kernel
    1335           0 :        kernel_regression_1d_sp(ii) = nadaraya_watson(z, y)
    1336             :     end do
    1337             : 
    1338             :     ! check whether output has to be unpacked
    1339           0 :     if (present(mask) .and. (.not. present(xout))) then
    1340           0 :        deallocate(x)
    1341           0 :        nin = size(ix,1)
    1342           0 :        allocate(x(nin))
    1343           0 :        x = unpack(kernel_regression_1d_sp, mask, nodata)
    1344           0 :        deallocate(kernel_regression_1d_sp)
    1345           0 :        allocate(kernel_regression_1d_sp(nin))
    1346           0 :        kernel_regression_1d_sp = x
    1347             :     end if
    1348             : 
    1349             :     ! clean up
    1350           0 :     deallocate(xxout)
    1351           0 :     deallocate(x)
    1352           0 :     deallocate(y)
    1353           0 :     deallocate(z)
    1354             : 
    1355           0 :   end function kernel_regression_1d_sp
    1356             : 
    1357           3 :   function kernel_regression_2d_dp(ix, iy, h, silverman, xout, mask, nodata)
    1358             : 
    1359             :     implicit none
    1360             : 
    1361             :     real(dp), dimension(:,:),                       intent(in) :: ix
    1362             :     real(dp), dimension(:),                         intent(in) :: iy
    1363             :     real(dp), dimension(:),               optional, intent(in) :: h
    1364             :     logical,                              optional, intent(in) :: silverman
    1365             :     real(dp), dimension(:,:),             optional, intent(in) :: xout
    1366             :     logical,  dimension(:),               optional, intent(in) :: mask
    1367             :     real(dp),                             optional, intent(in) :: nodata
    1368             :     real(dp), dimension(:),   allocatable                      :: kernel_regression_2d_dp
    1369             : 
    1370             :     ! local variables
    1371             :     integer(i4)                           :: dims
    1372             :     integer(i4)                           :: nin, nout
    1373             :     integer(i4)                           :: ii, jj
    1374           4 :     real(dp), dimension(size(ix,2))       :: hh
    1375           1 :     real(dp), dimension(:,:), allocatable :: xxout
    1376           1 :     real(dp), dimension(:,:), allocatable :: z
    1377           1 :     real(dp), dimension(:,:), allocatable :: x
    1378           1 :     real(dp), dimension(:),   allocatable :: y
    1379             : 
    1380             :     ! consistency check - mask needs either nodata or xout
    1381           1 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
    1382           0 :        stop 'kernel_regression_1d_dp: missing nodata value or xout with present(mask)'
    1383             :     end if
    1384             : 
    1385           1 :     nin  = size(ix,1)
    1386           1 :     dims = size(ix,2)
    1387             :     ! consistency checks of inputs
    1388           1 :     if (size(iy) .ne. nin) stop 'kernel_regression_2d_dp: size(y) /= size(x,1)'
    1389           1 :     if (present(h)) then
    1390           0 :        if (size(h) .ne. dims) stop 'kernel_regression_2d_dp: size(h) /= size(x,2)'
    1391             :     end if
    1392           1 :     if (present(xout)) then
    1393           0 :        if (size(xout,2) .ne. dims) stop 'kernel_regression_2d_dp: size(xout) /= size(x,2)'
    1394             :     end if
    1395             : 
    1396             :     ! Pack if mask present
    1397           1 :     if (present(mask)) then
    1398           0 :        nin = count(mask)
    1399           0 :        allocate(x(nin,dims))
    1400           0 :        allocate(y(nin))
    1401           0 :        forall(ii=1:dims) x(:,ii) = pack(ix(:,ii), mask)
    1402           0 :        y = pack(iy, mask)
    1403             :     else
    1404           1 :        nin = size(ix,1)
    1405           4 :        allocate(x(nin,dims))
    1406           3 :        allocate(y(nin))
    1407          24 :        x = ix
    1408          12 :        y = iy
    1409             :     endif
    1410           4 :     allocate(z(nin,dims))
    1411             : 
    1412             :     ! determine h
    1413           1 :     if (present(h)) then
    1414           0 :        hh = h
    1415             :     else
    1416           1 :        if (present(silverman)) then
    1417           1 :           hh = kernel_regression_h(x, y, silverman=silverman)
    1418             :        else
    1419           0 :           hh = kernel_regression_h(x, y, silverman=.true.)
    1420             :        end if
    1421             :     end if
    1422             : 
    1423             :     ! calc regression
    1424           1 :     if (present(xout)) then
    1425           0 :        nout = size(xout,1)
    1426           0 :        allocate(xxout(nout,dims))
    1427           0 :        xxout = xout
    1428             :     else
    1429           1 :        nout = nin
    1430           3 :        allocate(xxout(nout,dims))
    1431          24 :        xxout = x
    1432             :     end if
    1433             :     ! allocate output
    1434           3 :     allocate(kernel_regression_2d_dp(nout))
    1435             : 
    1436             :     ! loop through all regression points
    1437          11 :     do ii = 1, nout
    1438         230 :        forall(jj=1:dims) z(:,jj) = (x(:,jj) - xxout(ii,jj)) / hh(jj)
    1439             :        ! nadaraya-watson estimator of gaussian multivariate kernel
    1440          11 :        kernel_regression_2d_dp(ii) = nadaraya_watson(z, y)
    1441             :     end do
    1442             : 
    1443             :     ! check whether output has to be unpacked
    1444           1 :     if (present(mask) .and. (.not. present(xout))) then
    1445           0 :        deallocate(y)
    1446           0 :        nin = size(iy,1)
    1447           0 :        allocate(y(nin))
    1448           0 :        y = unpack(kernel_regression_2d_dp, mask, nodata)
    1449           0 :        deallocate(kernel_regression_2d_dp)
    1450           0 :        allocate(kernel_regression_2d_dp(nin))
    1451           0 :        kernel_regression_2d_dp = y
    1452             :     end if
    1453             : 
    1454             :     ! clean up
    1455           1 :     deallocate(xxout)
    1456           1 :     deallocate(x)
    1457           1 :     deallocate(y)
    1458           1 :     deallocate(z)
    1459             : 
    1460           1 :   end function kernel_regression_2d_dp
    1461             : 
    1462           1 :   function kernel_regression_2d_sp(ix, iy, h, silverman, xout, mask, nodata)
    1463             : 
    1464             :     implicit none
    1465             : 
    1466             :     real(sp), dimension(:,:),                       intent(in) :: ix
    1467             :     real(sp), dimension(:),                         intent(in) :: iy
    1468             :     real(sp), dimension(:),               optional, intent(in) :: h
    1469             :     logical,                              optional, intent(in) :: silverman
    1470             :     real(sp), dimension(:,:),             optional, intent(in) :: xout
    1471             :     logical,  dimension(:),               optional, intent(in) :: mask
    1472             :     real(sp),                             optional, intent(in) :: nodata
    1473             :     real(sp), dimension(:),   allocatable                      :: kernel_regression_2d_sp
    1474             : 
    1475             :     ! local variables
    1476             :     integer(i4)                           :: dims
    1477             :     integer(i4)                           :: nin, nout
    1478             :     integer(i4)                           :: ii, jj
    1479           0 :     real(sp), dimension(size(ix,2))       :: hh
    1480           0 :     real(sp), dimension(:,:), allocatable :: xxout
    1481           0 :     real(sp), dimension(:,:), allocatable :: z
    1482           0 :     real(sp), dimension(:,:), allocatable :: x
    1483           0 :     real(sp), dimension(:),   allocatable :: y
    1484             : 
    1485             :     ! consistency check - mask needs either nodata or xout
    1486           0 :     if (present(mask) .and. (.not. present(xout)) .and. (.not. present(nodata)) ) then
    1487           0 :        stop 'kernel_regression_1d_sp: missing nodata value or xout with present(mask)'
    1488             :     end if
    1489             : 
    1490           0 :     nin  = size(ix,1)
    1491           0 :     dims = size(ix,2)
    1492             :     ! consistency checks of inputs
    1493           0 :     if (size(iy) .ne. nin) stop 'kernel_regression_2d_sp: size(y) /= size(x,1)'
    1494           0 :     if (present(h)) then
    1495           0 :        if (size(h) .ne. dims) stop 'kernel_regression_2d_sp: size(h) /= size(x,2)'
    1496             :     end if
    1497           0 :     if (present(xout)) then
    1498           0 :        if (size(xout,2) .ne. dims) stop 'kernel_regression_2d_sp: size(xout) /= size(x,2)'
    1499             :     end if
    1500             : 
    1501             :     ! Pack if mask present
    1502           0 :     if (present(mask)) then
    1503           0 :        nin = count(mask)
    1504           0 :        allocate(x(nin,dims))
    1505           0 :        allocate(y(nin))
    1506           0 :        forall(ii=1:dims) x(:,ii) = pack(ix(:,ii), mask)
    1507           0 :        y = pack(iy, mask)
    1508             :     else
    1509           0 :        nin = size(ix,1)
    1510           0 :        allocate(x(nin,dims))
    1511           0 :        allocate(y(nin))
    1512           0 :        x = ix
    1513           0 :        y = iy
    1514             :     endif
    1515           0 :     allocate(z(nin,dims))
    1516             : 
    1517             :     ! determine h
    1518           0 :     if (present(h)) then
    1519           0 :        hh = h
    1520             :     else
    1521           0 :        if (present(silverman)) then
    1522           0 :           hh = kernel_regression_h(x, y, silverman=silverman)
    1523             :        else
    1524           0 :           hh = kernel_regression_h(x, y, silverman=.true.)
    1525             :        end if
    1526             :     end if
    1527             : 
    1528             :     ! calc regression
    1529           0 :     if (present(xout)) then
    1530           0 :        nout = size(xout,1)
    1531           0 :        allocate(xxout(nout,dims))
    1532           0 :        xxout = xout
    1533             :     else
    1534           0 :        nout = nin
    1535           0 :        allocate(xxout(nout,dims))
    1536           0 :        xxout = x
    1537             :     end if
    1538             :     ! allocate output
    1539           0 :     allocate(kernel_regression_2d_sp(nout))
    1540             : 
    1541             :     ! loop through all regression points
    1542           0 :     do ii = 1, nout
    1543           0 :        forall(jj=1:dims) z(:,jj) = (x(:,jj) - xxout(ii,jj)) / hh(jj)
    1544             :        ! nadaraya-watson estimator of gaussian multivariate kernel
    1545           0 :        kernel_regression_2d_sp(ii) = nadaraya_watson(z, y)
    1546             :     end do
    1547             : 
    1548             :     ! check whether output has to be unpacked
    1549           0 :     if (present(mask) .and. (.not. present(xout))) then
    1550           0 :        deallocate(y)
    1551           0 :        nin = size(iy,1)
    1552           0 :        allocate(y(nin))
    1553           0 :        y = unpack(kernel_regression_2d_sp, mask, nodata)
    1554           0 :        deallocate(kernel_regression_2d_sp)
    1555           0 :        allocate(kernel_regression_2d_sp(nin))
    1556           0 :        kernel_regression_2d_sp = y
    1557             :     end if
    1558             : 
    1559             :     ! clean up
    1560           0 :     deallocate(xxout)
    1561           0 :     deallocate(x)
    1562           0 :     deallocate(y)
    1563           0 :     deallocate(z)
    1564             : 
    1565           0 :   end function kernel_regression_2d_sp
    1566             : 
    1567             : 
    1568             :   ! ------------------------------------------------------------------------------------------------
    1569             : 
    1570           0 :   function kernel_regression_h_1d_dp(ix, iy, silverman, mask)
    1571             : 
    1572           0 :     use mo_nelmin,    only: nelminrange      ! nd optimization
    1573             : 
    1574             :     implicit none
    1575             : 
    1576             :     real(dp), dimension(:),           intent(in) :: ix
    1577             :     real(dp), dimension(:),           intent(in) :: iy
    1578             :     logical,                optional, intent(in) :: silverman
    1579             :     logical,  dimension(:), optional, intent(in) :: mask
    1580             :     real(dp)                                     :: kernel_regression_h_1d_dp
    1581             : 
    1582             :     ! local variables
    1583             :     integer(i4)              :: nin
    1584           0 :     real(dp)                 :: nn
    1585           0 :     real(dp), dimension(1)   :: h
    1586           0 :     real(dp), dimension(1,2) :: bounds
    1587             :     real(dp), parameter      :: pre_h = 1.05922384104881_dp
    1588           0 :     real(dp), dimension(:), allocatable :: x, y
    1589             : 
    1590           0 :     if (present(mask)) then
    1591           0 :        nin = count(mask)
    1592           0 :        allocate(x(nin))
    1593           0 :        allocate(y(nin))
    1594           0 :        x = pack(ix, mask)
    1595           0 :        y = pack(iy, mask)
    1596             :     else
    1597           0 :        nin = size(ix,1)
    1598           0 :        allocate(x(nin))
    1599           0 :        allocate(y(nin))
    1600           0 :        x = ix
    1601           0 :        y = iy
    1602             :     endif
    1603           0 :     nn = real(nin,dp)
    1604             : 
    1605             :     ! Silverman's rule of thumb by
    1606             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1607             :     !h(1) = (4._dp/3._dp/real(nn,dp))**(0.2_dp) * stddev_x
    1608           0 :     h(1) = pre_h/(nn**0.2_dp) * stddev(x(:))
    1609             : 
    1610           0 :     if (present(silverman)) then
    1611           0 :        if (.not. silverman) then
    1612           0 :           bounds(1,1) = max(0.2_dp * h(1), (maxval(x)-minval(x))/nn)
    1613           0 :           bounds(1,2) = 5.0_dp * h(1)
    1614           0 :           call allocate_globals(x,y)
    1615           0 :           h = nelminrange(cross_valid_regression_dp, h, bounds)
    1616           0 :           call deallocate_globals()
    1617             :        end if
    1618             :     end if
    1619             : 
    1620           0 :     kernel_regression_h_1d_dp = h(1)
    1621             : 
    1622           0 :     deallocate(x)
    1623           0 :     deallocate(y)
    1624             : 
    1625           0 :   end function kernel_regression_h_1d_dp
    1626             : 
    1627           0 :   function kernel_regression_h_1d_sp(ix, iy, silverman, mask)
    1628             : 
    1629           0 :     use mo_nelmin,    only: nelminrange      ! nd optimization
    1630             : 
    1631             :     implicit none
    1632             : 
    1633             :     real(sp), dimension(:),           intent(in) :: ix
    1634             :     real(sp), dimension(:),           intent(in) :: iy
    1635             :     logical,                optional, intent(in) :: silverman
    1636             :     logical,  dimension(:), optional, intent(in) :: mask
    1637             :     real(sp)                                     :: kernel_regression_h_1d_sp
    1638             : 
    1639             :     ! local variables
    1640             :     integer(i4)              :: nin
    1641           0 :     real(sp)                 :: nn
    1642           0 :     real(sp), dimension(1)   :: h
    1643           0 :     real(sp), dimension(1,2) :: bounds
    1644             :     real(sp), parameter      :: pre_h = 1.05922384104881_sp
    1645           0 :     real(sp), dimension(:), allocatable :: x, y
    1646             : 
    1647           0 :     if (present(mask)) then
    1648           0 :        nin = count(mask)
    1649           0 :        allocate(x(nin))
    1650           0 :        allocate(y(nin))
    1651           0 :        x = pack(ix, mask)
    1652           0 :        y = pack(iy, mask)
    1653             :     else
    1654           0 :        nin = size(ix,1)
    1655           0 :        allocate(x(nin))
    1656           0 :        allocate(y(nin))
    1657           0 :        x = ix
    1658           0 :        y = iy
    1659             :     endif
    1660           0 :     nn = real(nin,sp)
    1661             : 
    1662             :     ! Silverman's rule of thumb by
    1663             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1664             :     !h(1) = (4._sp/3._sp/real(nn,sp))**(0.2_sp) * stddev_x
    1665           0 :     h(1) = pre_h/(nn**0.2_sp) * stddev(x(:))
    1666             : 
    1667           0 :     if (present(silverman)) then
    1668           0 :        if (.not. silverman) then
    1669           0 :           bounds(1,1) = max(0.2_sp * h(1), (maxval(x)-minval(x))/nn)
    1670           0 :           bounds(1,2) = 5.0_sp * h(1)
    1671           0 :           call allocate_globals(x,y)
    1672           0 :           h = nelminrange(cross_valid_regression_sp, h, bounds)
    1673           0 :           call deallocate_globals()
    1674             :        end if
    1675             :     end if
    1676             : 
    1677           0 :     kernel_regression_h_1d_sp = h(1)
    1678             : 
    1679           0 :     deallocate(x)
    1680           0 :     deallocate(y)
    1681             : 
    1682           0 :   end function kernel_regression_h_1d_sp
    1683             : 
    1684           9 :   function kernel_regression_h_2d_dp(ix, iy, silverman, mask)
    1685             : 
    1686           0 :     use mo_nelmin,    only: nelminrange      ! nd optimization
    1687             : 
    1688             :     implicit none
    1689             : 
    1690             :     real(dp), dimension(:,:),                 intent(in) :: ix
    1691             :     real(dp), dimension(:),                   intent(in) :: iy
    1692             :     logical,                        optional, intent(in) :: silverman
    1693             :     logical, dimension(:),          optional, intent(in) :: mask
    1694             :     real(dp), dimension(size(ix,2))                       :: kernel_regression_h_2d_dp
    1695             : 
    1696             :     ! local variables
    1697             :     integer(i4)                      :: dims, nn, ii
    1698           9 :     real(dp), dimension(size(ix,2))   :: h
    1699          12 :     real(dp), dimension(size(ix,2))   :: stddev_x
    1700          24 :     real(dp), dimension(size(ix,2),2) :: bounds
    1701           3 :     real(dp), dimension(:,:), allocatable :: x
    1702           3 :     real(dp), dimension(:),   allocatable :: y
    1703             : 
    1704           3 :     dims = size(ix,2)
    1705           3 :     if (present(mask)) then
    1706           0 :        nn = count(mask)
    1707           0 :        allocate(x(nn,dims))
    1708           0 :        allocate(y(nn))
    1709           0 :        forall(ii=1:dims) x(:,ii) = pack(ix(:,ii), mask)
    1710           0 :        y = pack(iy, mask)
    1711             :     else
    1712           3 :        nn = size(ix,1)
    1713          12 :        allocate(x(nn,dims))
    1714           9 :        allocate(y(nn))
    1715          72 :        x = ix
    1716          36 :        y = iy
    1717             :     endif
    1718             :     ! Silverman's rule of thumb by
    1719             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1720           9 :     do ii=1,dims
    1721           9 :        stddev_x(ii) = stddev(x(:,ii))
    1722             :     end do
    1723           9 :     h(:) = (4.0_dp/real(dims+2,dp)/real(nn,dp))**(1.0_dp/real(dims+4,dp)) * stddev_x(:)
    1724             : 
    1725           3 :     if (present(silverman)) then
    1726           3 :        if (.not. silverman) then
    1727           6 :           bounds(:,1) = max(0.2_dp * h(:), (maxval(x,dim=1)-minval(x,dim=1))/real(nn,dp))
    1728           6 :           bounds(:,2) = 5.0_dp * h(:)
    1729           2 :           call allocate_globals(x,y)
    1730           6 :           h = nelminrange(cross_valid_regression_dp, h, bounds)
    1731           2 :           call deallocate_globals()
    1732             :        end if
    1733             :     end if
    1734             : 
    1735           9 :     kernel_regression_h_2d_dp = h
    1736             : 
    1737           3 :     deallocate(x)
    1738           3 :     deallocate(y)
    1739             : 
    1740           3 :   end function kernel_regression_h_2d_dp
    1741             : 
    1742           0 :   function kernel_regression_h_2d_sp(ix, iy, silverman, mask)
    1743             : 
    1744           3 :     use mo_nelmin,    only: nelminrange      ! nd optimization
    1745             : 
    1746             :     implicit none
    1747             : 
    1748             :     real(sp), dimension(:,:),                 intent(in) :: ix
    1749             :     real(sp), dimension(:),                   intent(in) :: iy
    1750             :     logical,                        optional, intent(in) :: silverman
    1751             :     logical, dimension(:),          optional, intent(in) :: mask
    1752             :     real(sp), dimension(size(ix,2))                       :: kernel_regression_h_2d_sp
    1753             : 
    1754             :     ! local variables
    1755             :     integer(i4)                      :: dims, nn, ii
    1756           0 :     real(sp), dimension(size(ix,2))   :: h
    1757           0 :     real(sp), dimension(size(ix,2))   :: stddev_x
    1758           0 :     real(sp), dimension(size(ix,2),2) :: bounds
    1759           0 :     real(sp), dimension(:,:), allocatable :: x
    1760           0 :     real(sp), dimension(:),   allocatable :: y
    1761             : 
    1762           0 :     dims = size(ix,2)
    1763           0 :     if (present(mask)) then
    1764           0 :        nn = count(mask)
    1765           0 :        allocate(x(nn,dims))
    1766           0 :        allocate(y(nn))
    1767           0 :        forall(ii=1:dims) x(:,ii) = pack(ix(:,ii), mask)
    1768           0 :        y = pack(iy, mask)
    1769             :     else
    1770           0 :        nn = size(ix,1)
    1771           0 :        allocate(x(nn,dims))
    1772           0 :        allocate(y(nn))
    1773           0 :        x = ix
    1774           0 :        y = iy
    1775             :     endif
    1776             :     ! Silverman's rule of thumb by
    1777             :     ! Silvermann (1986), Scott (1992), Bowman and Azzalini (1997)
    1778           0 :     do ii=1,dims
    1779           0 :        stddev_x(ii) = stddev(x(:,ii))
    1780             :     end do
    1781           0 :     h(:) = (4.0_sp/real(dims+2,sp)/real(nn,sp))**(1.0_sp/real(dims+4,sp)) * stddev_x(:)
    1782             : 
    1783           0 :     if (present(silverman)) then
    1784           0 :        if (.not. silverman) then
    1785           0 :           bounds(:,1) = max(0.2_sp * h(:), (maxval(x,dim=1)-minval(x,dim=1))/real(nn,sp))
    1786           0 :           bounds(:,2) = 5.0_sp * h(:)
    1787           0 :           call allocate_globals(x,y)
    1788           0 :           h = nelminrange(cross_valid_regression_sp, h, bounds)
    1789           0 :           call deallocate_globals()
    1790             :        end if
    1791             :     end if
    1792             : 
    1793           0 :     kernel_regression_h_2d_sp = h
    1794             : 
    1795           0 :     deallocate(x)
    1796           0 :     deallocate(y)
    1797             : 
    1798           0 :   end function kernel_regression_h_2d_sp
    1799             : 
    1800             : 
    1801             :   ! ------------------------------------------------------------------------------------------------
    1802             :   !
    1803             :   !                PRIVATE ROUTINES
    1804             :   !
    1805             :   ! ------------------------------------------------------------------------------------------------
    1806             : 
    1807      270720 :   function nadaraya_watson_1d_dp(z, y, mask, valid)
    1808             : 
    1809           0 :     use mo_constants, only: twopi_dp
    1810             : 
    1811             :     implicit none
    1812             : 
    1813             :     real(dp), dimension(:),             intent(in)  :: z
    1814             :     real(dp), dimension(:),   optional, intent(in)  :: y
    1815             :     logical,  dimension(:),   optional, intent(in)  :: mask
    1816             :     logical,                  optional, intent(out) :: valid
    1817             :     real(dp)                                        :: nadaraya_watson_1d_dp
    1818             : 
    1819             :     ! local variables
    1820     2842560 :     real(dp), dimension(size(z,1)) :: w
    1821      135360 :     real(dp)                       :: sum_w
    1822      135360 :     logical,  dimension(size(z,1)) :: mask1d
    1823      135360 :     real(dp)                       :: large_z
    1824     2842560 :     real(dp), dimension(size(z,1)) :: ztmp
    1825             : 
    1826      135360 :     if (present(mask)) then
    1827       27720 :        mask1d = mask
    1828             :     else
    1829     2814840 :        mask1d = .true.
    1830             :     end if
    1831             : 
    1832      135360 :     large_z = sqrt(-2.0_dp*log(tiny(1.0_dp)*sqrt(twopi_dp)))
    1833     2842560 :     where (mask1d) ! temporary store z in w in case of mask
    1834      135360 :        ztmp = z
    1835             :     elsewhere
    1836             :        ztmp = 0.0_dp
    1837             :     end where
    1838     2842560 :     where (mask1d .and. (abs(ztmp) .lt. large_z))
    1839             :        w = (1.0_dp/sqrt(twopi_dp)) * exp(-0.5_dp*z*z)
    1840             :     elsewhere
    1841             :        w = 0.0_dp
    1842             :     end where
    1843     2842560 :     sum_w = sum(w, mask=mask1d)
    1844             : 
    1845      135360 :     if (present(valid)) valid = .true.
    1846      135360 :     if (present(y)) then       ! kernel regression
    1847           0 :        if (sum_w .lt. tiny(1.0_dp)) then
    1848           0 :           nadaraya_watson_1d_dp = huge(1.0_dp)
    1849           0 :           if (present(valid)) valid = .false.
    1850             :        else
    1851           0 :           nadaraya_watson_1d_dp = sum(w*y,mask=mask1d) / sum_w
    1852             :        end if
    1853             :     else                        ! kernel density
    1854             :        nadaraya_watson_1d_dp = sum_w
    1855             :     end if
    1856             : 
    1857      135360 :   end function nadaraya_watson_1d_dp
    1858             : 
    1859           0 :   function nadaraya_watson_1d_sp(z, y, mask, valid)
    1860             : 
    1861      135360 :     use mo_constants, only: twopi_sp
    1862             : 
    1863             :     implicit none
    1864             : 
    1865             :     real(sp), dimension(:),             intent(in)  :: z
    1866             :     real(sp), dimension(:),   optional, intent(in)  :: y
    1867             :     logical,  dimension(:),   optional, intent(in)  :: mask
    1868             :     logical,                  optional, intent(out) :: valid
    1869             :     real(sp)                                        :: nadaraya_watson_1d_sp
    1870             : 
    1871             :     ! local variables
    1872           0 :     real(sp), dimension(size(z,1)) :: w
    1873           0 :     real(sp)                       :: sum_w
    1874           0 :     logical,  dimension(size(z,1)) :: mask1d
    1875           0 :     real(sp)                       :: large_z
    1876           0 :     real(sp), dimension(size(z,1)) :: ztmp
    1877             : 
    1878           0 :     if (present(mask)) then
    1879           0 :        mask1d = mask
    1880             :     else
    1881           0 :        mask1d = .true.
    1882             :     end if
    1883             : 
    1884           0 :     large_z = sqrt(-2.0_sp*log(tiny(1.0_sp)*sqrt(twopi_sp)))
    1885           0 :     where (mask1d) ! temporary store z in w in case of mask
    1886           0 :        ztmp = z
    1887             :     elsewhere
    1888             :        ztmp = 0.0_sp
    1889             :     end where
    1890           0 :     where (mask1d .and. (abs(ztmp) .lt. large_z))
    1891             :        w = (1.0_sp/sqrt(twopi_sp)) * exp(-0.5_sp*z*z)
    1892             :     elsewhere
    1893             :        w = 0.0_sp
    1894             :     end where
    1895           0 :     sum_w = sum(w, mask=mask1d)
    1896             : 
    1897           0 :     if (present(valid)) valid = .true.
    1898           0 :     if (present(y)) then       ! kernel regression
    1899           0 :        if (sum_w .lt. tiny(1.0_sp)) then
    1900           0 :           nadaraya_watson_1d_sp = huge(1.0_sp)
    1901           0 :           if (present(valid)) valid = .false.
    1902             :        else
    1903           0 :           nadaraya_watson_1d_sp = sum(w*y,mask=mask1d) / sum_w
    1904             :        end if
    1905             :     else                        ! kernel density
    1906             :        nadaraya_watson_1d_sp = sum_w
    1907             :     end if
    1908             : 
    1909           0 :   end function nadaraya_watson_1d_sp
    1910             : 
    1911        9860 :   function nadaraya_watson_2d_dp(z, y, mask, valid)
    1912             : 
    1913           0 :     use mo_constants, only: twopi_dp
    1914             : 
    1915             :     implicit none
    1916             : 
    1917             :     real(dp), dimension(:,:),           intent(in)  :: z
    1918             :     real(dp), dimension(:),   optional, intent(in)  :: y
    1919             :     logical,  dimension(:),   optional, intent(in)  :: mask
    1920             :     logical,                  optional, intent(out) :: valid
    1921             :     real(dp)                                        :: nadaraya_watson_2d_dp
    1922             : 
    1923             :     ! local variables
    1924      113390 :     real(dp), dimension(size(z,1), size(z,2)) :: kerf
    1925       54230 :     real(dp), dimension(size(z,1))            :: w
    1926       14790 :     real(dp)                                  :: sum_w
    1927        4930 :     logical,  dimension(size(z,1))            :: mask1d
    1928        4930 :     logical,  dimension(size(z,1), size(z,2)) :: mask2d
    1929       14790 :     real(dp)                                  :: large_z
    1930      113390 :     real(dp), dimension(size(z,1), size(z,2)) :: ztmp
    1931             : 
    1932        4930 :     if (present(mask)) then
    1933       54120 :        mask1d = mask
    1934        4920 :        mask2d = spread(mask1d, dim=2, ncopies=size(z,2))
    1935             :     else
    1936         110 :        mask1d = .true.
    1937         230 :        mask2d = .true.
    1938             :     end if
    1939             : 
    1940        4930 :     large_z = sqrt(-2.0_dp*log(tiny(1.0_dp)*sqrt(twopi_dp)))
    1941      113390 :     where (mask2d) ! temporary store z in kerf in case of mask
    1942        4930 :        ztmp = z
    1943             :     elsewhere
    1944             :        ztmp = 0.0_dp
    1945             :     end where
    1946      113390 :     where (mask2d .and. (abs(ztmp) .lt. large_z))
    1947             :        kerf = (1.0_dp/sqrt(twopi_dp)) * exp(-0.5_dp*z*z)
    1948             :     elsewhere
    1949             :        kerf = 0.0_dp
    1950             :     end where
    1951      152830 :     w     = product(kerf, dim=2, mask=mask2d)
    1952       54230 :     sum_w = sum(w, mask=mask1d)
    1953             : 
    1954        4930 :     if (present(valid)) valid = .true.
    1955        4930 :     if (present(y)) then       ! kernel regression
    1956        4930 :        if (sum_w .lt. tiny(1.0_dp)) then
    1957           0 :           nadaraya_watson_2d_dp = huge(1.0_dp)
    1958           0 :           if (present(valid)) valid = .false.
    1959             :        else
    1960       54230 :           nadaraya_watson_2d_dp = sum(w*y,mask=mask1d) / sum_w
    1961             :        end if
    1962             :     else                       ! kernel density
    1963             :        nadaraya_watson_2d_dp = sum_w
    1964             :     end if
    1965             : 
    1966        4930 :   end function nadaraya_watson_2d_dp
    1967             : 
    1968           0 :   function nadaraya_watson_2d_sp(z, y, mask, valid)
    1969             : 
    1970        4930 :     use mo_constants, only: twopi_sp
    1971             : 
    1972             :     implicit none
    1973             : 
    1974             :     real(sp), dimension(:,:),           intent(in)  :: z
    1975             :     real(sp), dimension(:),   optional, intent(in)  :: y
    1976             :     logical,  dimension(:),   optional, intent(in)  :: mask
    1977             :     logical,                  optional, intent(out) :: valid
    1978             :     real(sp)                                        :: nadaraya_watson_2d_sp
    1979             : 
    1980             :     ! local variables
    1981           0 :     real(sp), dimension(size(z,1), size(z,2)) :: kerf
    1982           0 :     real(sp), dimension(size(z,1))            :: w
    1983           0 :     real(sp)                                  :: sum_w
    1984           0 :     logical,  dimension(size(z,1))            :: mask1d
    1985           0 :     logical,  dimension(size(z,1), size(z,2)) :: mask2d
    1986           0 :     real(sp)                                  :: large_z
    1987           0 :     real(sp), dimension(size(z,1), size(z,2)) :: ztmp
    1988             : 
    1989           0 :     if (present(mask)) then
    1990           0 :        mask1d = mask
    1991           0 :        mask2d = spread(mask1d, dim=2, ncopies=size(z,2))
    1992             :     else
    1993           0 :        mask1d = .true.
    1994           0 :        mask2d = .true.
    1995             :     end if
    1996             : 
    1997           0 :     large_z = sqrt(-2.0_sp*log(tiny(1.0_sp)*sqrt(twopi_sp)))
    1998           0 :     where (mask2d) ! temporary store z in kerf in case of mask
    1999           0 :        ztmp = z
    2000             :     elsewhere
    2001             :        ztmp = 0.0_sp
    2002             :     end where
    2003           0 :     where (mask2d .and. (abs(ztmp) .lt. large_z))
    2004             :        kerf = (1.0_sp/sqrt(twopi_sp)) * exp(-0.5_sp*z*z)
    2005             :     elsewhere
    2006             :        kerf = 0.0_sp
    2007             :     end where
    2008           0 :     w     = product(kerf, dim=2, mask=mask2d)
    2009           0 :     sum_w = sum(w, mask=mask1d)
    2010             : 
    2011           0 :     if (present(valid)) valid = .true.
    2012           0 :     if (present(y)) then       ! kernel regression
    2013           0 :        if (sum_w .lt. tiny(1.0_sp)) then
    2014           0 :           nadaraya_watson_2d_sp = huge(1.0_sp)
    2015           0 :           if (present(valid)) valid = .false.
    2016             :        else
    2017           0 :           nadaraya_watson_2d_sp = sum(w*y,mask=mask1d) / sum_w
    2018             :        end if
    2019             :     else                       ! kernel density
    2020             :        nadaraya_watson_2d_sp = sum_w
    2021             :     end if
    2022             : 
    2023           0 :   end function nadaraya_watson_2d_sp
    2024             : 
    2025             : 
    2026             :   ! ------------------------------------------------------------------------------------------------
    2027             : 
    2028         492 :   function cross_valid_regression_dp(h)
    2029             : 
    2030             :     implicit none
    2031             : 
    2032             :     ! Helper function that calculates cross-validation function for the
    2033             :     ! Nadaraya-Watson estimator, which is basically the mean square error
    2034             :     ! between the observations and the model estimates without the specific point,
    2035             :     ! i.e. the jackknife estimate (Haerdle et al. 2000).
    2036             : 
    2037             :     ! Function is always 2D because allocate_globals makes always 2D arrays.
    2038             : 
    2039             :     real(dp), dimension(:), intent(in) :: h
    2040             :     real(dp)                           :: cross_valid_regression_dp
    2041             : 
    2042             :     ! local variables
    2043             :     integer(i4)                                                    :: ii, kk, nn
    2044         492 :     logical,  dimension(size(global_x_dp,1))                       :: mask
    2045        5904 :     real(dp), dimension(size(global_x_dp,1))                       :: out
    2046       11316 :     real(dp), dimension(size(global_x_dp,1),size(global_x_dp,2))   :: zz
    2047             :     logical                                                        :: valid, valid_tmp
    2048             : 
    2049         492 :     nn = size(global_x_dp,1)
    2050             :     ! Loop through each regression point and calc kernel estimate without that point (Jackknife)
    2051         492 :     valid = .true.
    2052        5412 :     mask  = .true.
    2053        5412 :     do ii=1, nn
    2054        4920 :        mask(ii) = .false.
    2055      113160 :        zz(:,:) = 0.0_dp
    2056      201720 :        forall(kk=1:nn, mask(kk)) zz(kk,:) = (global_x_dp(kk,:) - global_x_dp(ii,:)) / h(:)
    2057        4920 :        out(ii) = nadaraya_watson(zz, y=global_y_dp, mask=mask, valid=valid_tmp)
    2058        4920 :        valid = valid .and. valid_tmp
    2059        5412 :        mask(ii) = .true.
    2060             :     end do
    2061             : 
    2062             :     ! Mean square deviation
    2063         492 :     if (valid) then
    2064        5412 :        cross_valid_regression_dp = sum((global_y_dp-out)**2) / real(nn,dp)
    2065             :     else
    2066             :        cross_valid_regression_dp = huge(1.0_dp)
    2067             :     end if
    2068             : 
    2069             :     ! write(*,*) 'cross_valid_regression_dp = ', cross_valid_regression_dp, '  ( h = ', h, ' )'
    2070             : 
    2071           0 :   end function cross_valid_regression_dp
    2072             : 
    2073           0 :   function cross_valid_regression_sp(h)
    2074             : 
    2075             :     implicit none
    2076             : 
    2077             :     ! Helper function that calculates cross-validation function for the
    2078             :     ! Nadaraya-Watson estimator, which is basically the mean square error
    2079             :     ! between the observations and the model estimates without the specific point,
    2080             :     ! i.e. the jackknife estimate (Haerdle et al. 2000).
    2081             : 
    2082             :     ! Function is always 2D because set_global_for_opti makes always 2D arrays.
    2083             : 
    2084             :     real(sp), dimension(:), intent(in) :: h
    2085             :     real(sp)                           :: cross_valid_regression_sp
    2086             : 
    2087             :     ! local variables
    2088             :     integer(i4)                                                    :: ii, kk, nn
    2089           0 :     logical,  dimension(size(global_x_sp,1))                       :: mask
    2090           0 :     real(sp), dimension(size(global_x_sp,1))                       :: out
    2091           0 :     real(sp), dimension(size(global_x_sp,1),size(global_x_sp,2))   :: zz
    2092             :     logical                                                        :: valid, valid_tmp
    2093             : 
    2094           0 :     nn = size(global_x_sp,1)
    2095             :     ! Loop through each regression point and calc kernel estimate without that point (Jackknife)
    2096           0 :     valid = .true.
    2097           0 :     mask  = .true.
    2098           0 :     do ii=1, nn
    2099           0 :        mask(ii) = .false.
    2100           0 :        forall(kk=1:nn, mask(kk)) zz(kk,:) = (global_x_sp(kk,:) - global_x_sp(ii,:)) / h(:)
    2101           0 :        out(ii) = nadaraya_watson(zz, y=global_y_sp, mask=mask, valid=valid_tmp)
    2102           0 :        valid = valid .and. valid_tmp
    2103           0 :        mask(ii) = .true.
    2104             :     end do
    2105             : 
    2106             :     ! Mean square deviation
    2107           0 :     if (valid) then
    2108           0 :        cross_valid_regression_sp = sum((global_y_sp-out)**2) / real(nn,sp)
    2109             :     else
    2110             :        cross_valid_regression_sp = huge(1.0_sp)
    2111             :     end if
    2112             : 
    2113         492 :   end function cross_valid_regression_sp
    2114             : 
    2115             : 
    2116             :   ! ------------------------------------------------------------------------------------------------
    2117             : 
    2118          66 :   function cross_valid_density_1d_dp(h)
    2119             : 
    2120           0 :     use mo_integrate, only: int_regular
    2121             : 
    2122             :     implicit none
    2123             : 
    2124             :     ! Helper function that calculates cross-validation function for the
    2125             :     ! Nadaraya-Watson estimator, which is basically the mean square error
    2126             :     ! where model estimate is replaced by the jackknife estimate (Haerdle et al. 2000).
    2127             : 
    2128             :     real(dp), intent(in)               :: h
    2129             :     real(dp)                           :: cross_valid_density_1d_dp
    2130             : 
    2131             :     ! local variables
    2132             :     integer(i4)                              :: ii, nn
    2133          66 :     logical,  dimension(size(global_x_dp,1)) :: mask
    2134        1386 :     real(dp), dimension(size(global_x_dp,1)) :: out
    2135        1386 :     real(dp), dimension(size(global_x_dp,1)) :: zz
    2136          66 :     real(dp)                                 :: delta
    2137             :     integer(i4)                              :: mesh_n
    2138          66 :     real(dp), dimension(:), allocatable      :: xMeshed
    2139          66 :     real(dp), dimension(:), allocatable      :: outIntegral
    2140        1452 :     real(dp), dimension(size(global_x_dp,1)) :: zzIntegral
    2141        1386 :     real(dp)                                 :: stddev_x
    2142        1386 :     real(dp)                                 :: summ, multiplier, thresh
    2143             : 
    2144          66 :     nn   = size(global_x_dp,1)
    2145          66 :     if (nn .le. 100_i4) then       ! if few number of data points given, mesh consists of 100*n points
    2146          66 :        mesh_n = 100_i4*nn
    2147             :     else                           ! mesh_n such that mesh consists of not more than 10000 points
    2148           0 :        mesh_n = Max(2_i4, 10000_i4/nn) * nn
    2149             :     end if
    2150         198 :     allocate(xMeshed(mesh_n))
    2151         132 :     allocate(outIntegral(mesh_n))
    2152             : 
    2153             :     ! integral of squared density function, i.e. L2-norm of kernel^2
    2154          66 :     stddev_x = stddev(global_x_dp(:,1))
    2155        2838 :     xMeshed  = mesh(minval(global_x_dp) - 3.0_dp*stddev_x, maxval(global_x_dp) + 3.0_dp*stddev_x, mesh_n, delta)
    2156             : 
    2157          66 :     multiplier = 1.0_dp/(real(nn,dp)*h)
    2158          66 :     if (multiplier <= 1.0_dp) then
    2159          66 :        thresh = tiny(1.0_dp)/multiplier
    2160             :     else
    2161             :        thresh = 0.0_dp
    2162             :     endif
    2163      132066 :     do ii=1, mesh_n
    2164     2772000 :        zzIntegral = (global_x_dp(:,1) - xMeshed(ii)) / h
    2165      132000 :        outIntegral(ii) = nadaraya_watson(zzIntegral)
    2166      132066 :        if (outIntegral(ii) .gt. thresh) outIntegral(ii) = multiplier * outIntegral(ii)
    2167             :     end do
    2168      132066 :     where (outIntegral .lt. sqrt(tiny(1.0_dp)) )
    2169             :        outIntegral = 0.0_dp
    2170             :     end where
    2171      132066 :     summ = int_regular(outIntegral*outIntegral, delta)
    2172             : 
    2173             :     ! leave-one-out estimate
    2174        1386 :     mask = .true.
    2175        1386 :     do ii=1, nn
    2176        1320 :        mask(ii) = .false.
    2177       27720 :        zz       = (global_x_dp(:,1) - global_x_dp(ii,1)) / h
    2178        1320 :        out(ii)  = nadaraya_watson(zz, mask=mask)
    2179        1320 :        if (out(ii) .gt. thresh) out(ii) = multiplier * out(ii)
    2180        1386 :        mask(ii) = .true.
    2181             :     end do
    2182             : 
    2183        1386 :     cross_valid_density_1d_dp = summ - 2.0_dp / (real(nn,dp)) * sum(out)
    2184             :     ! write(*,*) 'h = ',h, '   cross_valid = ',cross_valid_density_1d_dp
    2185             : 
    2186             :     ! clean up
    2187          66 :     deallocate(xMeshed)
    2188          66 :     deallocate(outIntegral)
    2189             : 
    2190          66 :   end function cross_valid_density_1d_dp
    2191             : 
    2192           0 :   function cross_valid_density_1d_sp(h)
    2193             : 
    2194          66 :     use mo_integrate, only: int_regular
    2195             : 
    2196             :     implicit none
    2197             : 
    2198             :     ! Helper function that calculates cross-validation function for the
    2199             :     ! Nadaraya-Watson estimator, which is basically the mean square error
    2200             :     ! where model estimate is replaced by the jackknife estimate (Haerdle et al. 2000).
    2201             : 
    2202             :     real(sp),               intent(in) :: h
    2203             :     real(sp)                           :: cross_valid_density_1d_sp
    2204             : 
    2205             :     ! local variables
    2206             :     integer(i4)                              :: ii, nn
    2207           0 :     logical,  dimension(size(global_x_sp,1)) :: mask
    2208           0 :     real(sp), dimension(size(global_x_sp,1)) :: out
    2209           0 :     real(sp), dimension(size(global_x_sp,1)) :: zz
    2210           0 :     real(sp)                                 :: delta
    2211             :     integer(i4)                              :: mesh_n
    2212           0 :     real(sp), dimension(:), allocatable      :: xMeshed
    2213           0 :     real(sp), dimension(:), allocatable      :: outIntegral
    2214           0 :     real(sp), dimension(size(global_x_sp,1)) :: zzIntegral
    2215           0 :     real(sp)                                 :: stddev_x
    2216           0 :     real(sp)                                 :: summ, multiplier, thresh
    2217             : 
    2218           0 :     nn   = size(global_x_sp,1)
    2219           0 :     if (nn .le. 100_i4) then       ! if few number of data points given, mesh consists of 100*n points
    2220           0 :        mesh_n = 100_i4*nn
    2221             :     else                           ! mesh_n such that mesh consists of not more than 10000 points
    2222           0 :        mesh_n = Max(2_i4, 10000_i4/nn) * nn
    2223             :     end if
    2224           0 :     allocate(xMeshed(mesh_n))
    2225           0 :     allocate(outIntegral(mesh_n))
    2226             : 
    2227             :     ! integral of squared density function, i.e. L2-norm of kernel^2
    2228           0 :     stddev_x = stddev(global_x_sp(:,1))
    2229           0 :     xMeshed  = mesh(minval(global_x_sp) - 3.0_sp*stddev_x, maxval(global_x_sp) + 3.0_sp*stddev_x, mesh_n, delta)
    2230             : 
    2231           0 :     multiplier = 1.0_sp/(real(nn,sp)*h)
    2232           0 :     if (multiplier <= 1.0_sp) then
    2233           0 :        thresh = tiny(1.0_sp)/multiplier
    2234             :     else
    2235             :        thresh = 0.0_sp
    2236             :     endif
    2237           0 :     do ii=1, mesh_n
    2238           0 :        zzIntegral = (global_x_sp(:,1) - xMeshed(ii)) / h
    2239           0 :        outIntegral(ii) = nadaraya_watson(zzIntegral)
    2240           0 :        if (outIntegral(ii) .gt. thresh) outIntegral(ii) = multiplier * outIntegral(ii)
    2241             :     end do
    2242           0 :     where (outIntegral .lt. sqrt(tiny(1.0_sp)) )
    2243             :        outIntegral = 0.0_sp
    2244             :     end where
    2245           0 :     summ = int_regular(outIntegral*outIntegral, delta)
    2246             : 
    2247             :     ! leave-one-out estimate
    2248           0 :     mask = .true.
    2249           0 :     do ii=1, nn
    2250           0 :        mask(ii) = .false.
    2251           0 :        zz       = (global_x_sp(:,1) - global_x_sp(ii,1)) / h
    2252           0 :        out(ii)  = nadaraya_watson(zz, mask=mask)
    2253           0 :        if (out(ii) .gt. thresh) out(ii) = multiplier * out(ii)
    2254           0 :        mask(ii) = .true.
    2255             :     end do
    2256             : 
    2257           0 :     cross_valid_density_1d_sp = summ - 2.0_sp / (real(nn,sp)) * sum(out)
    2258             : 
    2259             :     ! clean up
    2260           0 :     deallocate(xMeshed)
    2261           0 :     deallocate(outIntegral)
    2262             : 
    2263           0 :   end function cross_valid_density_1d_sp
    2264             : 
    2265             : 
    2266             :   ! ------------------------------------------------------------------------------------------------
    2267             : 
    2268           6 :   subroutine allocate_globals_1d_dp(x,y,xout)
    2269             : 
    2270             :     implicit none
    2271             : 
    2272             :     real(dp), dimension(:),           intent(in) :: x
    2273             :     real(dp), dimension(:), optional, intent(in) :: y
    2274             :     real(dp), dimension(:), optional, intent(in) :: xout
    2275             : 
    2276           9 :     allocate( global_x_dp(size(x,1),1) )
    2277          66 :     global_x_dp(:,1) = x
    2278             : 
    2279           3 :     if (present(y)) then
    2280           0 :        allocate( global_y_dp(size(y,1)) )
    2281           0 :        global_y_dp = y
    2282             :     end if
    2283             : 
    2284           3 :     if (present(xout)) then
    2285           0 :        allocate( global_xout_dp(size(xout,1),1) )
    2286           0 :        global_xout_dp(:,1) = xout
    2287             :     end if
    2288             : 
    2289           0 :   end subroutine allocate_globals_1d_dp
    2290             : 
    2291           0 :   subroutine allocate_globals_1d_sp(x,y,xout)
    2292             : 
    2293             :     implicit none
    2294             : 
    2295             :     real(sp), dimension(:),           intent(in) :: x
    2296             :     real(sp), dimension(:), optional, intent(in) :: y
    2297             :     real(sp), dimension(:), optional, intent(in) :: xout
    2298             : 
    2299           0 :     allocate( global_x_sp(size(x,1),1) )
    2300           0 :     global_x_sp(:,1) = x
    2301             : 
    2302           0 :     if (present(y)) then
    2303           0 :        allocate( global_y_sp(size(y,1)) )
    2304           0 :        global_y_sp = y
    2305             :     end if
    2306             : 
    2307           0 :     if (present(xout)) then
    2308           0 :        allocate( global_xout_sp(size(xout,1),1) )
    2309           0 :        global_xout_sp(:,1) = xout
    2310             :     end if
    2311             : 
    2312           3 :   end subroutine allocate_globals_1d_sp
    2313             : 
    2314           4 :   subroutine allocate_globals_2d_dp(x,y,xout)
    2315             : 
    2316             :     implicit none
    2317             : 
    2318             :     real(dp), dimension(:,:),           intent(in) :: x
    2319             :     real(dp), dimension(:),   optional, intent(in) :: y
    2320             :     real(dp), dimension(:,:), optional, intent(in) :: xout
    2321             : 
    2322           8 :     allocate( global_x_dp(size(x,1),size(x,2)) )
    2323          48 :     global_x_dp = x
    2324             : 
    2325           2 :     if (present(y)) then
    2326           6 :        allocate( global_y_dp(size(y,1)) )
    2327          24 :        global_y_dp = y
    2328             :     end if
    2329             : 
    2330           2 :     if (present(xout)) then
    2331           0 :        allocate( global_xout_dp(size(xout,1),size(xout,2)) )
    2332           0 :        global_xout_dp = xout
    2333             :     end if
    2334             : 
    2335           0 :   end subroutine allocate_globals_2d_dp
    2336             : 
    2337           0 :   subroutine allocate_globals_2d_sp(x,y,xout)
    2338             : 
    2339             :     implicit none
    2340             : 
    2341             :     real(sp), dimension(:,:),           intent(in) :: x
    2342             :     real(sp), dimension(:),   optional, intent(in) :: y
    2343             :     real(sp), dimension(:,:), optional, intent(in) :: xout
    2344             : 
    2345           0 :     allocate( global_x_sp(size(x,1),size(x,2)) )
    2346           0 :     global_x_sp = x
    2347             : 
    2348           0 :     if (present(y)) then
    2349           0 :        allocate( global_y_sp(size(y,1)) )
    2350           0 :        global_y_sp = y
    2351             :     end if
    2352             : 
    2353           0 :     if (present(xout)) then
    2354           0 :        allocate( global_xout_sp(size(xout,1),size(xout,2)) )
    2355           0 :        global_xout_sp = xout
    2356             :     end if
    2357             : 
    2358           2 :   end subroutine allocate_globals_2d_sp
    2359             : 
    2360             : 
    2361             :   ! ------------------------------------------------------------------------------------------------
    2362             : 
    2363           5 :   subroutine deallocate_globals()
    2364             : 
    2365             :     implicit none
    2366             : 
    2367           5 :     if (allocated(global_x_dp))    deallocate(global_x_dp)
    2368           5 :     if (allocated(global_y_dp))    deallocate(global_y_dp)
    2369           5 :     if (allocated(global_xout_dp)) deallocate(global_xout_dp)
    2370           5 :     if (allocated(global_x_sp))    deallocate(global_x_sp)
    2371           5 :     if (allocated(global_y_sp))    deallocate(global_y_sp)
    2372           5 :     if (allocated(global_xout_sp)) deallocate(global_xout_sp)
    2373             : 
    2374           0 :   end subroutine deallocate_globals
    2375             : 
    2376             : 
    2377             :   ! ------------------------------------------------------------------------------------------------
    2378             : 
    2379           0 :   FUNCTION golden_sp(ax,bx,cx,func,tol,xmin)
    2380             : 
    2381             :     IMPLICIT NONE
    2382             : 
    2383             :     REAL(SP), INTENT(IN)      :: ax,bx,cx,tol
    2384             :     REAL(SP), INTENT(OUT)     :: xmin
    2385             :     REAL(SP)                  :: golden_sp
    2386             : 
    2387             :     INTERFACE
    2388             :        FUNCTION func(x)
    2389             :          use mo_kind
    2390             :          IMPLICIT NONE
    2391             :          REAL(SP), INTENT(IN) :: x
    2392             :          REAL(SP)             :: func
    2393             :        END FUNCTION func
    2394             :     END INTERFACE
    2395             : 
    2396             :     REAL(SP), PARAMETER       :: R=0.61803399_sp,C=1.0_sp-R
    2397           0 :     REAL(SP)                  :: f1,f2,x0,x1,x2,x3
    2398             : 
    2399           0 :     x0=ax
    2400           0 :     x3=cx
    2401           0 :     if (abs(cx-bx) > abs(bx-ax)) then
    2402           0 :        x1=bx
    2403           0 :        x2=bx+C*(cx-bx)
    2404             :     else
    2405           0 :        x2=bx
    2406           0 :        x1=bx-C*(bx-ax)
    2407             :     end if
    2408           0 :     f1=func(x1)
    2409           0 :     f2=func(x2)
    2410             :     do
    2411           0 :        if (abs(x3-x0) <= tol*(abs(x1)+abs(x2))) exit
    2412           0 :        if (f2 < f1) then
    2413           0 :           call shft3(x0,x1,x2,R*x2+C*x3)
    2414           0 :           call shft2(f1,f2,func(x2))
    2415             :        else
    2416           0 :           call shft3(x3,x2,x1,R*x1+C*x0)
    2417           0 :           call shft2(f2,f1,func(x1))
    2418             :        end if
    2419             :     end do
    2420           5 :     if (f1 < f2) then
    2421           0 :        golden_sp=f1
    2422           0 :        xmin=x1
    2423             :     else
    2424           0 :        golden_sp=f2
    2425           0 :        xmin=x2
    2426             :     end if
    2427             : 
    2428             :   CONTAINS
    2429             : 
    2430           0 :     SUBROUTINE shft2(a,b,c)
    2431             :       REAL(SP), INTENT(OUT)   :: a
    2432             :       REAL(SP), INTENT(INOUT) :: b
    2433             :       REAL(SP), INTENT(IN)    :: c
    2434           0 :       a=b
    2435           0 :       b=c
    2436           0 :     END SUBROUTINE shft2
    2437             : 
    2438           0 :     SUBROUTINE shft3(a,b,c,d)
    2439             :       REAL(SP), INTENT(OUT)   :: a
    2440             :       REAL(SP), INTENT(INOUT) :: b,c
    2441             :       REAL(SP), INTENT(IN)    :: d
    2442           0 :       a=b
    2443           0 :       b=c
    2444           0 :       c=d
    2445           0 :     END SUBROUTINE shft3
    2446             : 
    2447             :   END FUNCTION golden_sp
    2448             : 
    2449           3 :   FUNCTION golden_dp(ax,bx,cx,func,tol,xmin)
    2450             : 
    2451             :     IMPLICIT NONE
    2452             : 
    2453             :     REAL(DP), INTENT(IN)      :: ax,bx,cx,tol
    2454             :     REAL(DP), INTENT(OUT)     :: xmin
    2455             :     REAL(DP)                  :: golden_dp
    2456             : 
    2457             :     INTERFACE
    2458             :        FUNCTION func(x)
    2459             :          use mo_kind
    2460             :          IMPLICIT NONE
    2461             :          REAL(DP), INTENT(IN) :: x
    2462             :          REAL(DP)             :: func
    2463             :        END FUNCTION func
    2464             :     END INTERFACE
    2465             : 
    2466             :     REAL(DP), PARAMETER       :: R=0.61803399_dp,C=1.0_dp-R
    2467           3 :     REAL(DP)                  :: f1,f2,x0,x1,x2,x3
    2468             : 
    2469           3 :     x0=ax
    2470           3 :     x3=cx
    2471           3 :     if (abs(cx-bx) > abs(bx-ax)) then
    2472           3 :        x1=bx
    2473           3 :        x2=bx+C*(cx-bx)
    2474             :     else
    2475           0 :        x2=bx
    2476           0 :        x1=bx-C*(bx-ax)
    2477             :     end if
    2478           3 :     f1=func(x1)
    2479           3 :     f2=func(x2)
    2480             :     do
    2481          63 :        if (abs(x3-x0) <= tol*(abs(x1)+abs(x2))) exit
    2482          63 :        if (f2 < f1) then
    2483          30 :           call shft3(x0,x1,x2,R*x2+C*x3)
    2484          30 :           call shft2(f1,f2,func(x2))
    2485             :        else
    2486          30 :           call shft3(x3,x2,x1,R*x1+C*x0)
    2487          30 :           call shft2(f2,f1,func(x1))
    2488             :        end if
    2489             :     end do
    2490           3 :     if (f1 < f2) then
    2491           3 :        golden_dp=f1
    2492           3 :        xmin=x1
    2493             :     else
    2494           0 :        golden_dp=f2
    2495           0 :        xmin=x2
    2496             :     end if
    2497             : 
    2498             :   CONTAINS
    2499             : 
    2500          60 :     SUBROUTINE shft2(a,b,c)
    2501             :       REAL(DP), INTENT(OUT)   :: a
    2502             :       REAL(DP), INTENT(INOUT) :: b
    2503             :       REAL(DP), INTENT(IN)    :: c
    2504          60 :       a=b
    2505          60 :       b=c
    2506           3 :     END SUBROUTINE shft2
    2507             : 
    2508          60 :     SUBROUTINE shft3(a,b,c,d)
    2509             :       REAL(DP), INTENT(OUT)   :: a
    2510             :       REAL(DP), INTENT(INOUT) :: b,c
    2511             :       REAL(DP), INTENT(IN)    :: d
    2512          60 :       a=b
    2513          60 :       b=c
    2514          60 :       c=d
    2515         120 :     END SUBROUTINE shft3
    2516             : 
    2517             :   END FUNCTION golden_dp
    2518             : 
    2519             : 
    2520             :   ! ------------------------------------------------------------------------------------------------
    2521             : 
    2522         132 :   function mesh_dp(start, end, n, delta)
    2523             : 
    2524             :     implicit none
    2525             : 
    2526             :     real(dp),    intent(in)  :: start
    2527             :     real(dp),    intent(in)  :: end
    2528             :     integer(i4), intent(in)  :: n
    2529             :     real(dp),    intent(out) :: delta
    2530             :     real(dp), dimension(n)   :: mesh_dp
    2531             : 
    2532             :     ! local variables
    2533             :     integer(i4) :: ii
    2534             : 
    2535          66 :     delta = (end-start) / real(n-1,dp)
    2536      132066 :     forall(ii=1:n) mesh_dp(ii) = start + (ii-1) * delta
    2537             : 
    2538          66 :   end function mesh_dp
    2539             : 
    2540          66 :   function mesh_sp(start, end, n, delta)
    2541             : 
    2542             :     implicit none
    2543             : 
    2544             :     real(sp),    intent(in)  :: start
    2545             :     real(sp),    intent(in)  :: end
    2546             :     integer(i4), intent(in)  :: n
    2547             :     real(sp),    intent(out) :: delta
    2548             :     real(sp), dimension(n)   :: mesh_sp
    2549             : 
    2550             :     ! local variables
    2551             :     integer(i4) :: ii
    2552             : 
    2553           0 :     delta = (end-start) / real(n-1,sp)
    2554           0 :     forall(ii=1:n) mesh_sp(ii) = start + (ii-1) * delta
    2555             : 
    2556           0 :   end function mesh_sp
    2557             : 
    2558           0 :   subroutine trapzd_dp(kernel,x,h,a,b,res,n)
    2559             : 
    2560           0 :     use mo_utils, only: linspace
    2561             : 
    2562             :     implicit none
    2563             : 
    2564             :     ! kernel density function
    2565             :     interface
    2566             :        function kernel(ix, hh, silverman, xout, mask, nodata)
    2567             :          use mo_kind
    2568             :          implicit none
    2569             :          real(dp), dimension(:),                       intent(in) :: ix
    2570             :          real(dp),                           optional, intent(in) :: hh
    2571             :          logical,                            optional, intent(in) :: silverman
    2572             :          real(dp), dimension(:),             optional, intent(in) :: xout
    2573             :          logical,  dimension(:),             optional, intent(in) :: mask
    2574             :          real(dp),                           optional, intent(in) :: nodata
    2575             :          real(dp), dimension(:), allocatable                      :: kernel
    2576             :        end function kernel
    2577             :     end interface
    2578             : 
    2579             :     real(dp), dimension(:), intent(in)    :: x    ! datapoints
    2580             :     real(dp),               intent(in)    :: h    ! bandwidth
    2581             :     real(dp),               intent(in)    :: a,b  ! integration bounds
    2582             :     real(dp),               intent(inout) :: res  ! integral
    2583             :     integer(i4),            intent(in)    :: n    ! 2^(n-2) extra points between a and b
    2584             : 
    2585           0 :     real(dp)    :: del, fsum
    2586             :     integer(i4) :: it
    2587             : 
    2588           0 :     if (n == 1) then
    2589           0 :        res = 0.5_dp * (b-a) * sum(kernel(x, h, xout=(/a,b/)))
    2590             :     else
    2591           0 :        it   = 2**(n-2)
    2592           0 :        del  = (b-a)/real(it,dp)
    2593           0 :        fsum = sum(kernel(x, h, xout=linspace(a+0.5_dp*del,b-0.5_dp*del,it)))
    2594           0 :        res  = 0.5_dp * (res + del*fsum)
    2595             :     end if
    2596             : 
    2597           0 :   end subroutine trapzd_dp
    2598             : 
    2599           0 :   subroutine trapzd_sp(kernel,x,h,a,b,res,n)
    2600             : 
    2601           0 :     use mo_utils, only: linspace
    2602             : 
    2603             :     implicit none
    2604             : 
    2605             :     ! kernel density function
    2606             :     interface
    2607             :        function kernel(ix, hh, silverman, xout, mask, nodata)
    2608             :          use mo_kind
    2609             :          implicit none
    2610             :          real(sp), dimension(:),                       intent(in) :: ix
    2611             :          real(sp),                           optional, intent(in) :: hh
    2612             :          logical,                            optional, intent(in) :: silverman
    2613             :          real(sp), dimension(:),             optional, intent(in) :: xout
    2614             :          logical,  dimension(:),             optional, intent(in) :: mask
    2615             :          real(sp),                           optional, intent(in) :: nodata
    2616             :          real(sp), dimension(:), allocatable                      :: kernel
    2617             :        end function kernel
    2618             :     end interface
    2619             : 
    2620             :     real(sp), dimension(:), intent(in)    :: x    ! datapoints
    2621             :     real(sp),               intent(in)    :: h    ! bandwidth
    2622             :     real(sp),               intent(in)    :: a,b  ! integration bounds
    2623             :     real(sp),               intent(inout) :: res  ! integral
    2624             :     integer(i4),            intent(in)    :: n    ! 2^(n-2) extra points between a and b
    2625             : 
    2626           0 :     real(sp)    :: del, fsum
    2627             :     integer(i4) :: it
    2628             : 
    2629           0 :     if (n == 1) then
    2630           0 :        res = 0.5_sp * (b-a) * sum(kernel(x, h, xout=(/a,b/)))
    2631             :     else
    2632           0 :        it   = 2**(n-2)
    2633           0 :        del  = (b-a)/real(it,sp)
    2634           0 :        fsum = sum(kernel(x, h, xout=linspace(a+0.5_sp*del,b-0.5_sp*del,it)))
    2635           0 :        res  = 0.5_sp * (res + del*fsum)
    2636             :     end if
    2637             : 
    2638           0 :   end subroutine trapzd_sp
    2639             : 
    2640           0 :   subroutine polint_dp(xa, ya, x, y, dy)
    2641             : 
    2642           0 :     use mo_utils, only: eq, iminloc
    2643             : 
    2644             :     implicit none
    2645             : 
    2646             :     real(dp), dimension(:), intent(in)  :: xa, ya
    2647             :     real(dp),               intent(in)  :: x
    2648             :     real(dp),               intent(out) :: y, dy
    2649             : 
    2650             :     integer(i4) :: m, n, ns
    2651           0 :     real(dp), dimension(size(xa)) :: c, d, den, ho
    2652             : 
    2653           0 :     n  = size(xa)
    2654           0 :     c  = ya
    2655           0 :     d  = ya
    2656           0 :     ho = xa-x
    2657           0 :     ns = iminloc(abs(x-xa))
    2658           0 :     y  = ya(ns)
    2659           0 :     ns = ns-1
    2660           0 :     do m=1, n-1
    2661           0 :        den(1:n-m) = ho(1:n-m)-ho(1+m:n)
    2662           0 :        if (any(eq(den(1:n-m),0.0_dp))) then
    2663           0 :           stop 'polint: calculation failure'
    2664             :        endif
    2665           0 :        den(1:n-m) = (c(2:n-m+1)-d(1:n-m))/den(1:n-m)
    2666           0 :        d(1:n-m)   = ho(1+m:n)*den(1:n-m)
    2667           0 :        c(1:n-m)   = ho(1:n-m)*den(1:n-m)
    2668           0 :        if (2*ns < n-m) then
    2669           0 :           dy = c(ns+1)
    2670             :        else
    2671           0 :           dy = d(ns)
    2672           0 :           ns = ns-1
    2673             :        end if
    2674           0 :        y = y+dy
    2675             :     end do
    2676             : 
    2677           0 :   end subroutine polint_dp
    2678             : 
    2679           0 :   subroutine polint_sp(xa, ya, x, y, dy)
    2680             : 
    2681           0 :     use mo_utils, only: eq, iminloc
    2682             : 
    2683             :     implicit none
    2684             : 
    2685             :     real(sp), dimension(:), intent(in)  :: xa, ya
    2686             :     real(sp),               intent(in)  :: x
    2687             :     real(sp),               intent(out) :: y, dy
    2688             : 
    2689             :     integer(i4) :: m, n, ns
    2690           0 :     real(sp), dimension(size(xa)) :: c, d, den, ho
    2691             : 
    2692           0 :     n  = size(xa)
    2693           0 :     c  = ya
    2694           0 :     d  = ya
    2695           0 :     ho = xa-x
    2696           0 :     ns = iminloc(abs(x-xa))
    2697           0 :     y  = ya(ns)
    2698           0 :     ns = ns-1
    2699           0 :     do m=1, n-1
    2700           0 :        den(1:n-m) = ho(1:n-m)-ho(1+m:n)
    2701           0 :        if (any(eq(den(1:n-m),0.0_sp))) then
    2702           0 :           stop 'polint: calculation failure'
    2703             :        endif
    2704           0 :        den(1:n-m) = (c(2:n-m+1)-d(1:n-m))/den(1:n-m)
    2705           0 :        d(1:n-m)   = ho(1+m:n)*den(1:n-m)
    2706           0 :        c(1:n-m)   = ho(1:n-m)*den(1:n-m)
    2707           0 :        if (2*ns < n-m) then
    2708           0 :           dy = c(ns+1)
    2709             :        else
    2710           0 :           dy = d(ns)
    2711           0 :           ns = ns-1
    2712             :        end if
    2713           0 :        y = y+dy
    2714             :     end do
    2715             : 
    2716           0 :   end subroutine polint_sp
    2717             : 
    2718             : END MODULE mo_kernel

Generated by: LCOV version 1.16