From 3a3119a17568f7c5a02ca10ea20c3256acb74f46 Mon Sep 17 00:00:00 2001 From: Khachik Sargsyan Date: Wed, 10 Dec 2025 09:50:41 -0500 Subject: [PATCH] cleaned up many benchmark functions. updated example documentations. --- README.md | 8 + apps/create_data.py | 59 +- apps/uqpc/uq_pc.py | 2 +- docs/auto_examples/auto_examples_jupyter.zip | Bin 39996 -> 39996 bytes docs/auto_examples/ex_nn.zip | Bin 9478 -> 9478 bytes docs/auto_examples/ex_pce.zip | Bin 8972 -> 8972 bytes docs/misc/about.rst | 17 +- examples/ex_bcs.py | 6 +- examples/ex_bcs_mindex_growth.py | 9 +- examples/ex_colors.py | 6 +- examples/ex_evidence.py | 6 +- examples/ex_func.py | 30 +- examples/ex_funcall.py | 44 + examples/ex_funcgrad.py | 10 +- examples/ex_genz1d.py | 6 +- examples/ex_gmm.py | 7 +- examples/ex_gp.py | 7 +- examples/ex_gsa.py | 7 +- examples/ex_gsa_multi.py | 14 +- examples/ex_gso.py | 5 +- examples/ex_integrate.py | 5 + examples/ex_kl.py | 7 +- examples/ex_klpc.py | 5 +- examples/ex_lreg.py | 9 +- examples/ex_lreg_basiseval.py | 9 +- examples/ex_lreg_merr.py | 8 +- examples/ex_mcmc_banana.py | 7 +- examples/ex_mcmc_fitline.py | 6 +- examples/ex_mcmc_fitmodel.py | 6 +- examples/ex_mfvi.py | 9 +- examples/ex_mindex.py | 6 +- examples/ex_minf.py | 14 +- examples/ex_minf_sketch.py | 1 - examples/ex_mixture.py | 6 +- examples/ex_mrv.py | 5 +- examples/ex_optim.py | 5 + examples/ex_pcbasis1d.py | 6 +- examples/ex_pcgsa.py | 12 +- examples/ex_pcrv.py | 6 +- examples/ex_pcrv1.py | 6 +- examples/ex_pcrv2.py | 6 +- examples/ex_pcrv_mvn.py | 7 +- examples/ex_plot_fcn.py | 4 + examples/ex_quad.py | 6 +- examples/ex_sampling.py | 6 +- examples/ex_slice.py | 6 +- examples/ex_uprop.py | 6 +- examples/ex_uprop2.py | 6 +- examples/ex_webull.py | 6 +- examples/surrogates/ex_genz_bcs.py | 3 - examples/surrogates/ex_nn.py | 6 +- examples/surrogates/ex_nn_json.py | 6 +- pyproject.toml | 6 +- requirements.txt | 4 +- src/pytuq/ftools/gso.py | 5 +- src/pytuq/func/__init__.py | 1 + src/pytuq/func/bench.py | 352 +++ src/pytuq/func/bench1d.py | 531 ++++ src/pytuq/func/bench2d.py | 2342 ++++++++++++++++++ src/pytuq/func/benchNd.py | 529 ++++ src/pytuq/func/benchmark.py | 393 --- src/pytuq/func/chem.py | 43 +- src/pytuq/func/genz.py | 59 +- src/pytuq/func/oper.py | 130 +- src/pytuq/func/poly.py | 40 +- src/pytuq/func/toy.py | 124 +- src/pytuq/linred/klnn.py | 1 + src/pytuq/linred/klsurr.py | 2 + src/pytuq/lreg/anl.py | 1 - src/pytuq/utils/xutils.py | 26 + 70 files changed, 4341 insertions(+), 697 deletions(-) create mode 100755 examples/ex_funcall.py create mode 100755 src/pytuq/func/bench.py create mode 100755 src/pytuq/func/bench1d.py create mode 100755 src/pytuq/func/bench2d.py create mode 100755 src/pytuq/func/benchNd.py delete mode 100755 src/pytuq/func/benchmark.py mode change 100644 => 100755 src/pytuq/func/toy.py diff --git a/README.md b/README.md index 5981a96..4784112 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ Optional dependencies include: * pytorch (NN surrogates) * QUiNN (Quantification of Uncertainties in Neural Networks) * pyswarms (Particle Swarm Optimization) +* dill (for saving python objects) ## Installation @@ -71,6 +72,13 @@ $ pip install 'pytuq[dev]' $ pip install . ``` +## Contributors +Habib N. Najm (Sandia National Laboratories) +Javier Murgoitio-Esandi (Google) +Cosmin Safta (Sandia National Laboratories) +Joy Bahr-Mueller (Sandia National Laboratories) +Vahan Sargsyan (Stuyvesant High School) + ## License Distributed under BSD 3-Clause License. See `LICENSE.txt` for more information. diff --git a/apps/create_data.py b/apps/create_data.py index 8382efc..0daa441 100755 --- a/apps/create_data.py +++ b/apps/create_data.py @@ -5,9 +5,16 @@ import numpy as np import matplotlib.pyplot as plt -from pytuq.func import toy, genz, chem, benchmark, poly, oper, func +from pytuq.func import toy, genz, chem, bench, poly, oper, func +from pytuq.utils.xutils import instantiate_classes_from_module +fcn_dict = {} +for submod in ['bench', 'bench1d', 'bench2d', 'benchNd', 'chem', 'genz', 'poly', 'toy']: + this_objects = instantiate_classes_from_module(f"pytuq.func.{submod}") + for j in this_objects: + if j.name not in ['GenzBase', 'Poly']: + fcn_dict[j.name] = j usage_str = 'Script to create input-output data for benchmark functions.' @@ -16,29 +23,53 @@ # help="indices of requested parameters (count from 0)") parser.add_argument("-n", "--npts", dest="npts", type=int, default=100, help="Number of points") -parser.add_argument("-f", "--func", dest="func", type=str, default='lj', help="Function name", choices=['lj', 'mb']) - +parser.add_argument("-f", "--func", dest="func", type=str, default='Muller-Brown', help="Function name", choices=list(fcn_dict.keys())) +parser.add_argument("-x", "--xtrain", dest="xtrain_file", type=str, default=None, help="Optionally, provide x-sample file") +parser.add_argument("-s", "--sigma", dest="sig", type=float, default=0.0, help="Noise size") +parser.add_argument('-g', dest="grad", action='store_true', + help='Whether to compute gradients (default: False)') args = parser.parse_args() fname = args.func nsam = args.npts +grad = args.grad +sig = args.sig +xtrain_file = args.xtrain_file -if fname == 'lj': - fcn = chem.LennardJones() -elif fname == 'mb': - fcn = chem.MullerBrown() -else: - print(f'Function {fcn} is unknown. Please use -h to see the options. Exiting.') +try: + fcn = fcn_dict[fname] + print(f"{nsam} samples of function {fname} requested.") +except KeyError: + print(f'Function {fname} is unknown. Please use -h to see the options. Exiting.') sys.exit() -xx = fcn.sample_uniform(nsam) +if xtrain_file is None: + xx = fcn.sample_uniform(nsam) +else: + xx = np.loadtxt(xtrain_file) + if len(xx.shape)==1: + xx = xx.reshape(-1, 1) + print(f"Input data file {xtrain_file} has {xx.shape[0]} samples, ignoring the -n flag.") + +nsam, ndim = xx.shape +assert(ndim==fcn.dim) yy = fcn(xx) -gg = fcn.grad(xx) #nsam, nout, ndim -nsam_, nout, ndim = gg.shape -gg = gg.reshape(nsam, nout*ndim) +yy += sig * np.random.randn(*yy.shape) +print("Data noise sigma =", sig) +_, nout = yy.shape +assert(nout==fcn.outdim) + np.savetxt('xtrain.txt', xx) np.savetxt('ytrain.txt', yy) -np.savetxt('gtrain.txt', gg) +print("Input data saved to xtrain.txt with shape ", xx.shape) +print("Output data saved to ytrain.txt with shape ", yy.shape) +if grad: + gg = fcn.grad(xx) #nsam, nout, ndim + gg = gg.reshape(nsam, nout*ndim) + np.savetxt('gtrain.txt', gg) + print(f"Gradient data saved to gtrain.txt with shape ({nsam}, {nout}x{ndim})") + + diff --git a/apps/uqpc/uq_pc.py b/apps/uqpc/uq_pc.py index 8580eed..cbc9258 100755 --- a/apps/uqpc/uq_pc.py +++ b/apps/uqpc/uq_pc.py @@ -10,7 +10,7 @@ import numpy as np from pytuq.rv.pcrv import PCRV -from pytuq.func.benchmark import Ishigami +from pytuq.func.bench import Ishigami from pytuq.utils.xutils import savepk from pytuq.utils.mindex import get_mi, get_npc from pytuq.workflows.fits import pc_fit diff --git a/docs/auto_examples/auto_examples_jupyter.zip b/docs/auto_examples/auto_examples_jupyter.zip index 393eea6dcc0244c73c5c79ccc657265716d5aacd..0425fd690dda4b35b32ec841f87ae05296bb6564 100644 GIT binary patch delta 49 wcmdn9gK5tWCf)#VW)=|!5RjhUw2`+_m>EcKo+$jj9mJU2Jlh;h@10!_0Cn6D`Tzg` delta 49 wcmdn9gK5tWCf)#VW)=|!5O}t=Vk2*(Ff)+eJW=?4JBTs4dA2#2-aES<0J)|Vb^rhX diff --git a/docs/auto_examples/ex_nn.zip b/docs/auto_examples/ex_nn.zip index 5ab5e86108f494201164f2f827634e75e66f36c7..4feadc4727a719eb5fc456f035afb3acf203eafc 100644 GIT binary patch delta 24 dcmZqkYV+DKM}S#+dei3l0xBXP#^hrv4gheL2*LmW delta 24 dcmZqkYV+DKM}Ya+){4#Z1yn>pjLFAT8~}ui3V;9r diff --git a/docs/auto_examples/ex_pce.zip b/docs/auto_examples/ex_pce.zip index 9bdfbad1ae7134c1bc7864bb0ebffc705c9e8b65..68d919293e273cd51797647323fd32f6a9710c7e 100644 GIT binary patch delta 24 dcmeBi>v7w_&Ce`7y=gNa|1Mz=WAbSwM*w9l2s;1( delta 24 dcmeBi>v7w_&CmR7YsF?h{$0W##^lpVjsSTg3Hbm3 diff --git a/docs/misc/about.rst b/docs/misc/about.rst index 4ce79b9..666dfa1 100644 --- a/docs/misc/about.rst +++ b/docs/misc/about.rst @@ -12,7 +12,7 @@ The Python Toolkit for Uncertainty Quantification (PyTUQ) is a Python-only set o - Methods for Gaussian process regression - Global sensitivity analysis methods - SVD-based dimensionality reduction techniques -- Karhunen-Loeve expansions +- Karhunen-Loève expansions - Various methods for linear regression - Bayesian compressive sensing techniques - MCMC classes for calibration and parameter inference @@ -22,9 +22,18 @@ The Python Toolkit for Uncertainty Quantification (PyTUQ) is a Python-only set o Authors -------- -- Khachik Sargsyan -- Bert Debusschere -- Emilie Grace Baillo +- Khachik Sargsyan (Sandia National Laboratories) +- Bert Debusschere (Sandia National Laboratories) +- Emilie Grace Baillo (Sandia National Laboratories) + +Contributors +------------- +- Habib N. Najm (Sandia National Laboratories) +- Javier Murgoitio-Esandi (Google) +- Cosmin Safta (Sandia National Laboratories) +- Joy Bahr-Mueller (Sandia National Laboratories) +- Vahan Sargsyan (Stuyvesant High School) + Acknowledgements ----------------- diff --git a/examples/ex_bcs.py b/examples/ex_bcs.py index 5328446..13fc593 100755 --- a/examples/ex_bcs.py +++ b/examples/ex_bcs.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating Bayesian Compressive Sensing (BCS) for sparse polynomial chaos regression. -"""[summary] - -[description] +This script shows how to use BCS to construct a sparse polynomial chaos surrogate model +with specified multiindex and model data, comparing predictions with true function values. """ import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_bcs_mindex_growth.py b/examples/ex_bcs_mindex_growth.py index 63189ad..668127a 100755 --- a/examples/ex_bcs_mindex_growth.py +++ b/examples/ex_bcs_mindex_growth.py @@ -1,15 +1,18 @@ #!/usr/bin/env python -"""An example demonstrating bcs and multiindex growth. +"""Example demonstrating adaptive multiindex growth with Bayesian Compressive Sensing. + +This script shows how to iteratively grow a polynomial chaos surrogate using +adaptive multiindex selection and BCS regression for sparse approximation. """ import numpy as np from pytuq.rv.pcrv import PCRV from pytuq.utils.mindex import get_mi -from pytuq.utils.plotting import myrc, lighten_color, plot_dm, plot_jsens +from pytuq.utils.plotting import myrc, plot_dm from pytuq.utils.maps import scale01ToDom from pytuq.lreg.bcs import bcs -from pytuq.utils.mindex import mi_addfront_cons, mi_addfront +from pytuq.utils.mindex import mi_addfront import pytuq.utils.funcbank as fcb diff --git a/examples/ex_colors.py b/examples/ex_colors.py index 3a3ea70..ab2f339 100755 --- a/examples/ex_colors.py +++ b/examples/ex_colors.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""Example that creates RGB triples.""" +"""Example demonstrating color palette generation and visualization. + +This script creates and displays a set of RGB color triples using PyTUQ's +plotting utilities, useful for creating consistent color schemes in plots. +""" from matplotlib import pyplot as plt diff --git a/examples/ex_evidence.py b/examples/ex_evidence.py index 2163362..bdfa1c9 100755 --- a/examples/ex_evidence.py +++ b/examples/ex_evidence.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating model selection using Bayesian evidence computation. -"""[summary] - -[description] +This script compares different models using analytical linear regression (ANL) and +computes evidence values to determine which model best fits the data. """ import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_func.py b/examples/ex_func.py index 4b185c3..85b8020 100755 --- a/examples/ex_func.py +++ b/examples/ex_func.py @@ -1,25 +1,23 @@ #!/usr/bin/env python +"""Example demonstrating function composition and operations with PyTUQ function classes. -"""[summary] - -[description] +This script shows how to combine, transform, and manipulate various function objects +including toy functions, Genz functions, chemistry functions, and benchmark functions. """ -import sys import numpy as np from pytuq.utils.mindex import get_mi, get_npc -from pytuq.func import toy, genz, chem, benchmark, poly, oper, func +from pytuq.func import toy, genz, chem, bench2d, bench, benchNd, poly, oper, func from pytuq.utils.plotting import myrc myrc() - fcns = [ func.ModelWrapperFcn(lambda x,p : x[:,0]**p[0]+np.sin(x[:,1]**p[0]), 3, modelpar=[3]), \ oper.PickDim(2, 1, cf=100.)+chem.MullerBrown(),\ oper.PickDim(2, 1, cf=1.)-chem.MullerBrown(),\ - benchmark.Adjiman()*benchmark.Branin(), \ + bench2d.Adjiman()*bench2d.Branin(), \ oper.PickDim(2, 1, cf=1.) / (toy.Constant(2,np.ones(1,)) + oper.PickDim(2, 0, cf=1.)), \ oper.PickDim(2, 1, cf=100.)**3, \ toy.Quad(),\ @@ -33,15 +31,11 @@ genz.GenzCornerPeak(weights=[7., 2.]),\ chem.MullerBrown(),\ chem.LennardJones(),\ - benchmark.Sobol(dim=3),\ - benchmark.Franke(),\ - benchmark.Ishigami(),\ - benchmark.NegAlpineN2(),\ - benchmark.Adjiman(),\ - benchmark.Branin(),\ - benchmark.SumSquares(),\ - benchmark.Quadratic([-1., 2.], [[2., -1.], [-1., 1.]]),\ - benchmark.MVN([-1., 2.], [[2., -1.], [-1., 1.]]),\ + benchNd.Sobol(dim=3),\ + bench.Ishigami(),\ + benchNd.NegAlpineN2(),\ + benchNd.SumSquares(),\ + benchNd.MVN([-1., 2.], [[2., -1.], [-1., 1.]]),\ poly.Leg(get_mi(4,3), np.ones((get_npc(4, 3),))),\ poly.Mon(get_mi(4,3), np.ones((get_npc(4, 3),))),\ oper.CartesProdFcn(toy.Identity(1),toy.Identity(1)), \ @@ -50,8 +44,8 @@ oper.SliceFcn(chem.MullerBrown(), ind=[0,1]),\ oper.ComposeFcn(toy.Identity(2), genz.GenzOscillatory(weights=[-3., -1.])),\ oper.ComposeFcn(toy.Exp(), genz.GenzOscillatory(), name='Composite1d'),\ - oper.GradFcn(benchmark.Adjiman(), 1), \ - oper.GradFcn(benchmark.Franke(), 1), \ + oper.GradFcn(bench2d.Adjiman(), 1), \ + oper.GradFcn(bench2d.Franke(), 1), \ oper.PickDim(2, 1) ] diff --git a/examples/ex_funcall.py b/examples/ex_funcall.py new file mode 100755 index 0000000..19be5ff --- /dev/null +++ b/examples/ex_funcall.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +"""Example demonstrating automatic instantiation and testing of all PyTUQ function classes. + +This script automatically creates instances of all available function classes from +PyTUQ's function modules and validates their gradient implementations. +""" + +import numpy as np + +from pytuq.utils.plotting import myrc +from pytuq.utils.xutils import instantiate_classes_from_module +myrc() + +objects = [] +for submod in ['bench', 'bench1d', 'bench2d', 'benchNd', 'chem', 'genz', 'poly', 'toy']: + this_objects = instantiate_classes_from_module(f"pytuq.func.{submod}") + for j in this_objects: + if j.name not in ['GenzBase', 'Poly']: + objects.append(j) + +#print("Created instances:", objects) +for fcn in objects: + + print(f"========== Function {fcn.name} ==================") + print(fcn.name, "->", fcn) + + + print("Gradient check") + x = np.random.rand(111, fcn.dim) + assert(np.allclose(fcn.grad_(x, eps=1.e-8), fcn.grad(x), atol=1.e-5, rtol=1.e-3)) + + # print("Minimize") + # xmin = fcn.minimize() + # print(f"Minimum is at {xmin}") + + print(f"Domain is {fcn.domain}") + + nom = fcn.sample_uniform(1)[0] + print("Plotting 1d slice") + fcn.plot_1d(ngr=100, nom=nom) + + if fcn.dim>1: + print("Plotting 2d slice") + fcn.plot_2d(ngr=52, nom=nom) diff --git a/examples/ex_funcgrad.py b/examples/ex_funcgrad.py index e4070fc..792a02f 100755 --- a/examples/ex_funcgrad.py +++ b/examples/ex_funcgrad.py @@ -1,16 +1,14 @@ #!/usr/bin/env python +"""Example demonstrating gradient checking and evaluation for benchmark functions. -"""[summary] - -[description] +This script tests the analytical gradients of functions against numerical gradients, +and visualizes function values and derivatives. """ -import sys import numpy as np from matplotlib import pyplot as plt -from pytuq.utils.mindex import get_mi, get_npc -from pytuq.func import toy, genz, chem, benchmark, poly, oper, func +from pytuq.func import chem from pytuq.utils.plotting import myrc myrc() diff --git a/examples/ex_genz1d.py b/examples/ex_genz1d.py index 1479c14..472716e 100755 --- a/examples/ex_genz1d.py +++ b/examples/ex_genz1d.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating 1D Genz test functions. -"""[summary] - -[description] +This script evaluates and plots various 1D Genz functions including oscillatory, +corner peak, and sum functions across their domains. """ import numpy as np from matplotlib import pyplot as plt diff --git a/examples/ex_gmm.py b/examples/ex_gmm.py index 6140571..701d4b2 100755 --- a/examples/ex_gmm.py +++ b/examples/ex_gmm.py @@ -1,9 +1,12 @@ #!/usr/bin/env python -"""Test script for gmm.""" +"""Example demonstrating Gaussian Mixture Model (GMM) sampling and visualization. + +This script creates a GMM with multiple components, samples from it within +a specified domain, and visualizes the samples and probability density. +""" import numpy as np import matplotlib.pyplot as plt -from scipy.stats import truncnorm, multivariate_normal from pytuq.rv.mrv import GMM from pytuq.utils.plotting import plot_pdfs, plot_xrv diff --git a/examples/ex_gp.py b/examples/ex_gp.py index 4222e31..f2b94bc 100755 --- a/examples/ex_gp.py +++ b/examples/ex_gp.py @@ -1,10 +1,13 @@ #!/usr/bin/env python -"""A Gaussian Process fit example.""" +"""Example demonstrating Gaussian Process regression for function approximation. + +This script builds a Gaussian Process surrogate model from training data, +performs hyperparameter optimization, and evaluates prediction accuracy. +""" import numpy as np import matplotlib.pyplot as plt -from scipy.optimize import minimize from pytuq.fit.gp import gp from pytuq.rv.pcrv import PCRV diff --git a/examples/ex_gsa.py b/examples/ex_gsa.py index a41c899..01c08ef 100755 --- a/examples/ex_gsa.py +++ b/examples/ex_gsa.py @@ -1,7 +1,11 @@ #!/usr/bin/env python +"""Example demonstrating global sensitivity analysis using Sobol indices. +This script computes main and total Sobol sensitivity indices using either +sampling-based (SamSobol) or PC-based (PCSobol) methods for a test function. +""" -import os, sys +import sys import numpy as np try: import pprint @@ -9,7 +13,6 @@ print("Please pip install pprint for more readable printing.") -from pytuq.func.benchmark import Ishigami from pytuq.gsa.gsa import SamSobol, PCSobol from pytuq.utils.plotting import plot_sens, plot_jsens, myrc diff --git a/examples/ex_gsa_multi.py b/examples/ex_gsa_multi.py index a5b8ab4..f4332bb 100755 --- a/examples/ex_gsa_multi.py +++ b/examples/ex_gsa_multi.py @@ -1,17 +1,15 @@ #!/usr/bin/env python +"""Example demonstrating global sensitivity analysis for multi-output models. +This script performs Sobol sensitivity analysis on a simple multi-output model +to compute main and total sensitivity indices for each output dimension. +""" -import os, sys import numpy as np -try: - import pprint -except ModuleNotFoundError: - print("Please pip install pprint for more readable printing.") -from pytuq.func.benchmark import Ishigami -from pytuq.gsa.gsa import SamSobol, PCSobol, model_sens -from pytuq.utils.plotting import plot_sens, plot_jsens, myrc +from pytuq.gsa.gsa import model_sens +from pytuq.utils.plotting import myrc myrc() diff --git a/examples/ex_gso.py b/examples/ex_gso.py index 90d61ff..d1e2c36 100755 --- a/examples/ex_gso.py +++ b/examples/ex_gso.py @@ -1,5 +1,8 @@ #!/usr/bin/env python -"""Example for testing Multistage Modified Gram-Schmidt (MMGS) orthogonalization for functions.""" +"""Example for testing Multistage Modified Gram-Schmidt (MMGS) orthogonalization for functions. + +Written by Habib N. Najm (2025). +""" import sys import argparse diff --git a/examples/ex_integrate.py b/examples/ex_integrate.py index 470fecb..ccf0bcc 100755 --- a/examples/ex_integrate.py +++ b/examples/ex_integrate.py @@ -1,4 +1,9 @@ #!/usr/bin/env python +"""Example demonstrating numerical integration of Gaussian functions. + +This script tests various integration methods on single and double Gaussian +functions, comparing numerical results with analytical solutions. +""" import numpy as np from pytuq.ftools.integr import * diff --git a/examples/ex_kl.py b/examples/ex_kl.py index 88190f3..63aca71 100755 --- a/examples/ex_kl.py +++ b/examples/ex_kl.py @@ -1,11 +1,12 @@ #!/usr/bin/env python +"""Example demonstrating Karhunen-Loève Expansion (KLE) and SVD for dimensionality reduction. +This script builds KLE or SVD representations of model output data to capture +variance with reduced dimensionality. +""" -import sys import numpy as np -import matplotlib.pyplot as plt -from pytuq.linred.kle import KLE from pytuq.linred.svd import SVD diff --git a/examples/ex_klpc.py b/examples/ex_klpc.py index 7235a85..45c1e1c 100755 --- a/examples/ex_klpc.py +++ b/examples/ex_klpc.py @@ -1,7 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating KLE combined with Polynomial Chaos for high-dimensional output approximation. +This script uses Karhunen-Loève Expansion to reduce output dimensionality, then builds +PC surrogates for the reduced modes to efficiently represent high-dimensional model outputs. +""" -import sys import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_lreg.py b/examples/ex_lreg.py index 60003cc..e87f80e 100755 --- a/examples/ex_lreg.py +++ b/examples/ex_lreg.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating polynomial chaos linear regression methods. -"""[summary] - -[description] +This script compares various linear regression techniques (LSQ, ANL, OPT, MERR) +for constructing polynomial chaos surrogates and evaluates their performance. """ import numpy as np import matplotlib.pyplot as plt @@ -12,10 +12,7 @@ from pytuq.utils.mindex import get_mi from pytuq.utils.plotting import myrc, lighten_color, plot_dm from pytuq.utils.maps import scale01ToDom -from pytuq.lreg.merr import lreg_merr from pytuq.lreg.anl import anl -from pytuq.lreg.opt import opt -from pytuq.lreg.lreg import lsq import pytuq.utils.funcbank as fcb myrc() diff --git a/examples/ex_lreg_basiseval.py b/examples/ex_lreg_basiseval.py index 752146b..f731ef4 100755 --- a/examples/ex_lreg_basiseval.py +++ b/examples/ex_lreg_basiseval.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating polynomial chaos basis evaluation and regression methods. -"""[summary] - -[description] +This script compares different regression methods (least squares, analytical, optimization) +for polynomial chaos surrogate construction and evaluates basis function efficiency. """ import numpy as np import matplotlib.pyplot as plt @@ -12,10 +12,7 @@ from pytuq.utils.mindex import get_mi from pytuq.utils.plotting import myrc, lighten_color, plot_dm, plot_vars from pytuq.utils.maps import scale01ToDom -from pytuq.lreg.merr import lreg_merr from pytuq.lreg.anl import anl -from pytuq.lreg.opt import opt -from pytuq.lreg.lreg import lsq import pytuq.utils.funcbank as fcb diff --git a/examples/ex_lreg_merr.py b/examples/ex_lreg_merr.py index 15b1ad4..7d7d4a8 100755 --- a/examples/ex_lreg_merr.py +++ b/examples/ex_lreg_merr.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating linear regression with measurement error. -"""[summary] - -[description] +This script shows how to perform polynomial chaos regression accounting for +measurement errors in the data using the MERR (Measurement Error in Regression) method. """ import numpy as np import matplotlib.pyplot as plt @@ -13,8 +13,6 @@ from pytuq.utils.plotting import myrc, lighten_color from pytuq.utils.maps import scale01ToDom from pytuq.lreg.merr import lreg_merr -from pytuq.lreg.anl import anl -import pytuq.utils.funcbank as fcb myrc() diff --git a/examples/ex_mcmc_banana.py b/examples/ex_mcmc_banana.py index 142a8e3..bd82b3d 100755 --- a/examples/ex_mcmc_banana.py +++ b/examples/ex_mcmc_banana.py @@ -1,9 +1,12 @@ #!/usr/bin/env python +"""Example demonstrating MCMC sampling for a banana-shaped (Rosenbrock) distribution. + +This script compares different MCMC methods (AMCMC, HMC, MALA) for sampling from +a challenging banana-shaped posterior distribution. +""" -import sys import numpy as np -import matplotlib.pyplot as plt from scipy.stats import multivariate_normal from pytuq.minf.mcmc import AMCMC, HMC,MALA diff --git a/examples/ex_mcmc_fitline.py b/examples/ex_mcmc_fitline.py index b6a76d1..1db3bc9 100755 --- a/examples/ex_mcmc_fitline.py +++ b/examples/ex_mcmc_fitline.py @@ -1,7 +1,11 @@ #!/usr/bin/env python +"""Example demonstrating MCMC-based Bayesian linear model calibration. + +This script uses Adaptive MCMC to calibrate a linear model to noisy data, +inferring posterior distributions of the model parameters and observation noise. +""" import sys -import torch import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_mcmc_fitmodel.py b/examples/ex_mcmc_fitmodel.py index 8b7ac6a..3309efc 100755 --- a/examples/ex_mcmc_fitmodel.py +++ b/examples/ex_mcmc_fitmodel.py @@ -1,7 +1,11 @@ #!/usr/bin/env python +"""Example demonstrating MCMC-based Bayesian calibration of a multivariate linear model. + +This script uses Adaptive MCMC to infer parameters of a linear model with +multiple features, including bias and weight parameters. +""" import sys -import torch import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_mfvi.py b/examples/ex_mfvi.py index e1940ab..15297f9 100755 --- a/examples/ex_mfvi.py +++ b/examples/ex_mfvi.py @@ -1,18 +1,21 @@ #!/usr/bin/env python +"""Example demonstrating Mean-Field Variational Inference (MFVI) for Bayesian inference. + +This script uses MFVI with different optimization methods (PSO, Scipy) to approximate +posterior distributions for parameters in a simple model. +""" import numpy as np #from autograd import grad import matplotlib.pyplot as plt -from scipy.special import erf from pytuq.minf.vi import MFVI from pytuq.optim.pso import PSO -from pytuq.optim.sciwrap import ScipyWrapper from pytuq.func.func import Function -from pytuq.func.benchmark import TFData +from pytuq.func.bench1d import TFData from pytuq.utils.maps import scale01ToDom from pytuq.utils.plotting import plot_dm, myrc diff --git a/examples/ex_mindex.py b/examples/ex_mindex.py index f77590d..6eb471b 100755 --- a/examples/ex_mindex.py +++ b/examples/ex_mindex.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""Example of multiindex manipulations.""" +"""Example demonstrating multiindex generation and encoding operations. + +This script shows how to generate polynomial chaos multiindices and encode +them for efficient storage and manipulation. +""" from pytuq.utils.mindex import get_mi, encode_mindex diff --git a/examples/ex_minf.py b/examples/ex_minf.py index 154edd2..967160c 100755 --- a/examples/ex_minf.py +++ b/examples/ex_minf.py @@ -1,15 +1,23 @@ #!/usr/bin/env python +"""Example demonstrating model inference workflows for parameter estimation. + +This script shows different approaches to Bayesian parameter inference including +optimization-based and sampling-based methods for model calibration. +""" -import sys import torch import numpy as np - import pytuq.minf.minf as minf import pytuq.gsa.gsa as gsa try: - from quinn.nns.nnwrap import nn_p, nnwrapper + import pprint +except ModuleNotFoundError: + print("Please pip install pprint for more readable printing.") + +try: + from quinn.nns.nnwrap import nn_p except ImportError: print("Warning: QUiNN not installed. NN functionality won't work.") diff --git a/examples/ex_minf_sketch.py b/examples/ex_minf_sketch.py index ae47c14..1118f93 100755 --- a/examples/ex_minf_sketch.py +++ b/examples/ex_minf_sketch.py @@ -7,7 +7,6 @@ print("Please pip install pprint for more readable printing.") import numpy as np -import matplotlib.pyplot as plt import pytuq.func.func as func import pytuq.minf.minf as minf diff --git a/examples/ex_mixture.py b/examples/ex_mixture.py index a448f47..18319ec 100755 --- a/examples/ex_mixture.py +++ b/examples/ex_mixture.py @@ -1,8 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating mixture distributions with Weibull and Gaussian components. +This script creates and samples from a mixture distribution combining Weibull and +multivariate normal distributions with specified weights. +""" -import os -import sys import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_mrv.py b/examples/ex_mrv.py index b3b14ec..199ad20 100755 --- a/examples/ex_mrv.py +++ b/examples/ex_mrv.py @@ -1,10 +1,13 @@ #!/usr/bin/env python +"""Example demonstrating multivariate random variable (MRV) operations. +This script shows how to create and manipulate polynomial chaos random variables +including independent and multivariate normal PC random variables. +""" import numpy as np from pytuq.rv.pcrv import PCRV_iid, PCRV_mvn -from pytuq.utils.mindex import get_mi diff --git a/examples/ex_optim.py b/examples/ex_optim.py index d21d1f6..d4274d0 100755 --- a/examples/ex_optim.py +++ b/examples/ex_optim.py @@ -1,4 +1,9 @@ #!/usr/bin/env python +"""Example demonstrating optimization algorithms on the Rosenbrock function. + +This script compares different optimization methods (Gradient Descent, Adam, PSO, Scipy) +for minimizing the Rosenbrock function. +""" import sys import numpy as np diff --git a/examples/ex_pcbasis1d.py b/examples/ex_pcbasis1d.py index f66c676..c07f284 100755 --- a/examples/ex_pcbasis1d.py +++ b/examples/ex_pcbasis1d.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""A""" +"""Example demonstrating 1D polynomial chaos basis evaluation and plotting. + +This script evaluates and plots Hermite polynomial basis functions of various +orders to illustrate orthogonal polynomial behavior. +""" import numpy as np import matplotlib.pyplot as plt diff --git a/examples/ex_pcgsa.py b/examples/ex_pcgsa.py index ae91a85..49f8a05 100755 --- a/examples/ex_pcgsa.py +++ b/examples/ex_pcgsa.py @@ -1,17 +1,19 @@ #!/usr/bin/env python +"""Example demonstrating PC-based global sensitivity analysis. + +This script computes Sobol sensitivity indices using polynomial chaos expansions, +optionally building a PC surrogate first or directly using the Ishigami function. +""" -import os, sys try: import pprint except ModuleNotFoundError: print("Please pip install pprint for more readable printing.") -import numpy as np -from matplotlib import pyplot as plt from pytuq.lreg.lreg import lsq -from pytuq.func.benchmark import Ishigami -from pytuq.gsa.gsa import SamSobol, PCSobol +from pytuq.func.bench import Ishigami +from pytuq.gsa.gsa import PCSobol from pytuq.utils.plotting import plot_sens, plot_jsens, myrc myrc() diff --git a/examples/ex_pcrv.py b/examples/ex_pcrv.py index e8e15c6..d36d06b 100755 --- a/examples/ex_pcrv.py +++ b/examples/ex_pcrv.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating polynomial chaos random variable slicing operations. -"""[summary] - -[description] +This script shows how to slice a PC random variable by fixing certain dimensions +at nominal values to obtain a reduced-dimension PCRV. """ import numpy as np diff --git a/examples/ex_pcrv1.py b/examples/ex_pcrv1.py index 44bcdb8..1181fa8 100755 --- a/examples/ex_pcrv1.py +++ b/examples/ex_pcrv1.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""A test for some operations with the PCRV class.""" +"""Example demonstrating PCRV compression and random dimension selection. + +This script creates a multivariate normal PCRV with specified random dimensions, +samples from it, and demonstrates PC compression operations. +""" import numpy as np diff --git a/examples/ex_pcrv2.py b/examples/ex_pcrv2.py index 03130b0..d533c77 100755 --- a/examples/ex_pcrv2.py +++ b/examples/ex_pcrv2.py @@ -1,6 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating basic polynomial chaos random variable operations. + +This script creates a PCRV with random coefficients and demonstrates computing +statistics (mean, variance), basis norms, and sampling. +""" -import sys import numpy as np from pytuq.rv.pcrv import PCRV diff --git a/examples/ex_pcrv_mvn.py b/examples/ex_pcrv_mvn.py index 349615f..61a99e4 100755 --- a/examples/ex_pcrv_mvn.py +++ b/examples/ex_pcrv_mvn.py @@ -1,10 +1,13 @@ #!/usr/bin/env python +"""Example demonstrating multivariate normal polynomial chaos random variables. +This script creates PCRV_mvn objects with specified means and covariances, +and generates samples from the multivariate normal distribution. +""" import numpy as np -from pytuq.rv.pcrv import PCRV_iid, PCRV_mvn -from pytuq.utils.mindex import get_mi +from pytuq.rv.pcrv import PCRV_mvn covMatSize=14 #dimension of L and C diff --git a/examples/ex_plot_fcn.py b/examples/ex_plot_fcn.py index 85cd1a9..a324f82 100755 --- a/examples/ex_plot_fcn.py +++ b/examples/ex_plot_fcn.py @@ -1,5 +1,9 @@ #!/usr/bin/env python +"""Example demonstrating anchored 1D and 2D function plotting utilities. +This script shows how to use plotting utilities to visualize model functions +with respect to one or two parameters while fixing others at nominal values. +""" import numpy as np diff --git a/examples/ex_quad.py b/examples/ex_quad.py index 373cb34..59dac89 100755 --- a/examples/ex_quad.py +++ b/examples/ex_quad.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""A test for sampling routines.""" +"""Example demonstrating quadrature point generation for PC germ variables. + +This script generates and visualizes quadrature points for polynomial chaos +germ variables using tensor product quadrature rules. +""" import matplotlib.pyplot as plt diff --git a/examples/ex_sampling.py b/examples/ex_sampling.py index e5b3107..9cc76e1 100755 --- a/examples/ex_sampling.py +++ b/examples/ex_sampling.py @@ -1,5 +1,9 @@ #!/usr/bin/env python -"""A test for sampling routines.""" +"""Example demonstrating domain-constrained sampling from Gaussian Mixture Models. + +This script samples from a GMM within a specified domain and visualizes +the resulting samples and probability densities. +""" import numpy as np diff --git a/examples/ex_slice.py b/examples/ex_slice.py index 719ad9e..675d067 100755 --- a/examples/ex_slice.py +++ b/examples/ex_slice.py @@ -1,8 +1,8 @@ #!/usr/bin/env python +"""Example demonstrating 2D function slicing and visualization. -"""[summary] - -[description] +This script shows how to plot 2D slices of functions at various anchor points, +useful for exploring function behavior around specific parameter values. """ import numpy as np diff --git a/examples/ex_uprop.py b/examples/ex_uprop.py index 87ca6b3..199497a 100755 --- a/examples/ex_uprop.py +++ b/examples/ex_uprop.py @@ -1,6 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating uncertainty propagation through a model with PC inputs. + +This script shows how to propagate polynomial chaos input uncertainties through +a nonlinear model using projection or regression methods. +""" -import os import numpy as np from pytuq.rv.pcrv import PCRV, PCRV_iid diff --git a/examples/ex_uprop2.py b/examples/ex_uprop2.py index 2b93131..9cce6b9 100755 --- a/examples/ex_uprop2.py +++ b/examples/ex_uprop2.py @@ -1,6 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating uncertainty propagation workflows using projection and regression. + +This script compares projection-based and regression-based methods for propagating +PC input uncertainties through nonlinear forward models. +""" -import sys import numpy as np from pytuq.rv.pcrv import PCRV diff --git a/examples/ex_webull.py b/examples/ex_webull.py index 33ba78f..6885ed8 100755 --- a/examples/ex_webull.py +++ b/examples/ex_webull.py @@ -1,8 +1,10 @@ #!/usr/bin/env python +"""Example demonstrating Weibull distribution and mixture models. +This script creates Weibull distributions and mixtures with Gaussian components, +demonstrating sampling from complex mixed distributions. +""" -import os -import sys import numpy as np import matplotlib.pyplot as plt diff --git a/examples/surrogates/ex_genz_bcs.py b/examples/surrogates/ex_genz_bcs.py index e186469..d523746 100644 --- a/examples/surrogates/ex_genz_bcs.py +++ b/examples/surrogates/ex_genz_bcs.py @@ -28,13 +28,10 @@ """ # %% -import os -import sys import numpy as np import copy import math -import pytuq.utils.funcbank as fcb from matplotlib import pyplot as plt from sklearn.metrics import root_mean_squared_error diff --git a/examples/surrogates/ex_nn.py b/examples/surrogates/ex_nn.py index db32364..893ee5c 100644 --- a/examples/surrogates/ex_nn.py +++ b/examples/surrogates/ex_nn.py @@ -7,16 +7,14 @@ The ``build()`` and ``evaluate()`` functions similarly accept dictionaries and explicit keyword arguments during their respective function calls. """ -import sys import torch import numpy as np from pytuq.surrogates.nn import NN -from quinn.solvers.nn_vi import NN_VI -from quinn.nns.rnet import RNet, Poly +from quinn.nns.rnet import Poly from quinn.utils.plotting import myrc from quinn.utils.maps import scale01ToDom -from quinn.func.funcs import Sine, Sine10, blundell +from quinn.func.funcs import Sine def main(): """Main function.""" diff --git a/examples/surrogates/ex_nn_json.py b/examples/surrogates/ex_nn_json.py index 3fb2ca3..29454c2 100644 --- a/examples/surrogates/ex_nn_json.py +++ b/examples/surrogates/ex_nn_json.py @@ -9,17 +9,13 @@ When requested with a provided filename, the updated options are printed out to a json file. """ -import sys import torch -import json import numpy as np from pytuq.surrogates.nn import NN -from quinn.solvers.nn_vi import NN_VI -from quinn.nns.rnet import RNet, Poly from quinn.utils.plotting import myrc from quinn.utils.maps import scale01ToDom -from quinn.func.funcs import Sine, Sine10, blundell +from quinn.func.funcs import Sine def main(): """Main function.""" diff --git a/pyproject.toml b/pyproject.toml index ec15d89..0781ef9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,10 +55,14 @@ nn = [ optim = [ "pyswarms" ] +utils = [ + "dill" +] all = [ "torch", "uqinn", - "pyswarms" + "pyswarms", + "dill" ] diff --git a/requirements.txt b/requirements.txt index 687e276..bba1ac9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,7 @@ numpy<2 scipy -torch matplotlib +dill +pyswarms +torch git+https://github.com/sandialabs/quinn.git#egg=QUINN \ No newline at end of file diff --git a/src/pytuq/ftools/gso.py b/src/pytuq/ftools/gso.py index daaf58b..72c99af 100755 --- a/src/pytuq/ftools/gso.py +++ b/src/pytuq/ftools/gso.py @@ -1,5 +1,8 @@ #!/usr/bin/env python -"""Module for Gram-Schmidt orthogonalization for functions.""" +"""Module for Gram-Schmidt orthogonalization for functions. + +Written by Habib N. Najm (2025). +""" import sys import time diff --git a/src/pytuq/func/__init__.py b/src/pytuq/func/__init__.py index 4265cc3..cf529d7 100755 --- a/src/pytuq/func/__init__.py +++ b/src/pytuq/func/__init__.py @@ -1 +1,2 @@ #!/usr/bin/env python + diff --git a/src/pytuq/func/bench.py b/src/pytuq/func/bench.py new file mode 100755 index 0000000..60960f0 --- /dev/null +++ b/src/pytuq/func/bench.py @@ -0,0 +1,352 @@ +#!/usr/bin/env python +""" +Benchmark functions module that are not 1d, 2d, or Nd. + +Most of the functions are taken from https://github.com/Vahanosi4ek/pytuq_funcs that autogenerates the codes given function's latex strings. +""" +import sys +import numpy as np +from scipy.stats import multivariate_normal + +from .func import Function + + +################################################################################ +################################################################################ +################################################################################ + + +class Ishigami(Function): + r"""Ishigami function + + Reference: [https://www.sfu.ca/~ssurjano/ishigami.html] + + .. math:: + f(x) = \sin(x_1) + a \sin^2(x_2) + b x_3^4 \sin(x_1) + + Default constant values are :math:`a = 7` and :math:`b = 0.1` + + """ + def __init__(self, name='Ishigami'): + super().__init__() + self.name = name + self.dim = 3 + self.outdim = 1 + + + self.setDimDom(np.ones((self.dim, 1)) * np.array([-np.pi, np.pi])) + self.a = 7 + self.b = 0.1 + + + def __call__(self, x): + sam = x.shape[0] + self.checkDim(x) + + ydata=np.empty((sam, self.outdim)) + + for j in range(sam): + ydata[j, 0]=np.sin(x[j,0])+self.a*np.sin(x[j,1])**2+self.b*np.sin(x[j,0])*x[j,2]**4 + + return ydata + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + grad[:,0,0] = np.cos(x[:,0]) + self.b * np.cos(x[:,0]) * x[:,2]**4 + grad[:,0,1] = 2. * self.a * np.sin(x[:,1]) * np.cos(x[:,1]) + grad[:,0,2] = 4. * self.b * np.sin(x[:,0]) * x[:,2]**3 + + return grad + + +################################################################################ +################################################################################ +################################################################################ + + +class Wolfe(Function): + r""" + Wolfe function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_W.html#go_benchmark.Wolfe] + + .. math:: + f(x)=c_1(x_1^2 + x_2^2 - x_1x_2)^{c_2} + x_3 + + + Default constant values are :math:`c = (4/3, 0.75)` + + """ + def __init__(self, c1=4./3., c2=0.75, name="Wolfe"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 3 + self.outdim = 1 + + self.setDimDom(domain=np.array([[0, 2], [0, 2], [0, 2]])) + + def __call__(self, x): + return ((self.c1*((x[:, 0]**2+x[:, 1]**2)-(x[:, 0]*x[:, 1]))**self.c2)+x[:, 2]).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = self.c1*(((x[:, 0]**2+x[:, 1]**2)-(x[:, 0]*x[:, 1]))**self.c2*(self.c2*((1/((x[:, 0]**2+x[:, 1]**2)-(x[:, 0]*x[:, 1])))*((x[:, 0]**2*(2*(1/x[:, 0])))-x[:, 1])))) + grad[:, 0, 1] = self.c1*(((x[:, 0]**2+x[:, 1]**2)-(x[:, 0]*x[:, 1]))**self.c2*(self.c2*((1/((x[:, 0]**2+x[:, 1]**2)-(x[:, 0]*x[:, 1])))*((x[:, 1]**2*(2*(1/x[:, 1])))-x[:, 0])))) + grad[:, 0, 2] = 1 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Colville(Function): + r""" + Colville function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.Colville] + + .. math:: + f(x)=(x_1 - c_1)^{2} + c_2 (x_1^{2} - x_2)^{2} + c_3 (x_2 - c_4)^{2} + (x_3 - c_5)^{2} + c_6 (x_3^{2} - x_4)^{2} + c_7 (x_4 - c_8)^{2} + c_9 \frac{x_4 - c_{10}}{x_2} + + + Default constant values are :math:`c = (1.0, 100.0, 10.1, 1.0, 1.0, 90.0, 10.1, 1.0, 19.8, 1.0)` + + """ + def __init__(self, c1=1.0, c2=100.0, c3=10.1, c4=1.0, c5=1.0, c6=90.0, c7=10.1, c8=1.0, c9=19.8, c10=1.0, name="Colville"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8, self.c9, self.c10 = c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 + self.dim = 4 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]])) + + def __call__(self, x): + return ((x[:, 0]-self.c1)**(2.0)+self.c2*(x[:, 0]**(2.0)-x[:, 1])**(2.0)+self.c3*(x[:, 1]-self.c4)**(2.0)+(x[:, 2]-self.c5)**(2.0)+self.c6*(x[:, 2]**(2.0)-x[:, 3])**(2.0)+self.c7*(x[:, 3]-self.c8)**(2.0)+self.c9*(x[:, 3]-self.c10)/(x[:, 1])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = 2.0*(x[:, 0]-self.c1)+self.c2*2.0*(x[:, 0]**2.0-x[:, 1])*2.0*x[:, 0] + grad[:, 0, 1] = -2.0*self.c2*(x[:, 0]**2.0-x[:, 1])+2.0*self.c3*(x[:, 1]-self.c4)-(self.c9*(x[:, 3]-self.c10))/(x[:, 1]**2.0) + grad[:, 0, 2] = 2.0*(x[:, 2]-self.c5)+2.0*self.c6*(x[:, 2]**2.0-x[:, 3])*2.0*x[:, 2] + grad[:, 0, 3] = -2.0*self.c6*(x[:, 2]**2.0-x[:, 3])+self.c7*2.0*(x[:, 3]-self.c8)+(self.c9)/(x[:, 1]) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + + +class MieleCantrell(Function): + r""" + MieleCantrell function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.MieleCantrell] + + A multimodal minimization function + + .. math:: + f(x)=(e^{-x_1} - x_2)^4 + c_1(x_2 - x_3)^6 + \tan^4(x_3 - x_4) + x_1^8 + + + Default constant values are :math:`c_1 = 100.0` + """ + def __init__(self, c1=100.0, name="MieleCantrell"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 4 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-1, 1], [-1, 1], [-1, 1], [-1, 1]])) + + def __call__(self, x): + return ((np.exp(-x[:, 0])-x[:, 1])**4+self.c1*(x[:, 1]-x[:, 2])**6+np.tan(x[:, 2]-x[:, 3])**4+x[:, 0]**8).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = - 4.0*(np.exp(-x[:, 0])-x[:, 1])**3 * np.exp(-x[:, 0]) + 8.0*x[:, 0]**7 + + grad[:, 0, 1] = - 4.0*(np.exp(-x[:, 0])-x[:, 1])**3 + self.c1*6*(x[:, 1]-x[:, 2])**5 + + grad[:, 0, 2] = -self.c1*6.0*(x[:, 1]-x[:, 2])**5 + 4.0 * np.sin(x[:, 2]-x[:, 3])**3/np.cos(x[:, 2]-x[:, 3])**5 + + grad[:, 0, 3] = - 4.0 * np.sin(x[:, 2]-x[:, 3])**3/np.cos(x[:, 2]-x[:, 3])**5 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Powell(Function): + r""" + Powell function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Powell] + + .. math:: + f(x)=(x_3+c_1x_1)^2+c_2(x_2-x_4)^2+(x_1-c_3x_2)^4+c_4(x_3-x_4)^4 + + + Default constant values are :math:`c = (10.0, 5.0, 2.0, 10.0)` + + """ + def __init__(self, c1=10.0, c2=5.0, c3=2.0, c4=10.0, name="Powell"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 4 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-4, 5], [-4, 5], [-4, 5], [-4, 5]])) + + def __call__(self, x): + return ((((x[:, 2]+(self.c1*x[:, 0]))**2+(self.c2*(x[:, 1]-x[:, 3])**2))+(x[:, 0]-(self.c3*x[:, 1]))**4)+(self.c4*(x[:, 2]-x[:, 3])**4)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((x[:, 2]+(self.c1*x[:, 0]))**2*(2*((1/(x[:, 2]+(self.c1*x[:, 0])))*self.c1)))+((x[:, 0]-(self.c3*x[:, 1]))**4*(4*(1/(x[:, 0]-(self.c3*x[:, 1]))))) + grad[:, 0, 1] = (self.c2*((x[:, 1]-x[:, 3])**2*(2*(1/(x[:, 1]-x[:, 3])))))+((x[:, 0]-(self.c3*x[:, 1]))**4*(4*((1/(x[:, 0]-(self.c3*x[:, 1])))*(-self.c3)))) + grad[:, 0, 2] = ((x[:, 2]+(self.c1*x[:, 0]))**2*(2*(1/(x[:, 2]+(self.c1*x[:, 0])))))+(self.c4*((x[:, 2]-x[:, 3])**4*(4*(1/(x[:, 2]-x[:, 3]))))) + grad[:, 0, 3] = (self.c2*((x[:, 1]-x[:, 3])**2*(2*(-(1/(x[:, 1]-x[:, 3]))))))+(self.c4*((x[:, 2]-x[:, 3])**4*(4*(-(1/(x[:, 2]-x[:, 3])))))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Dolan(Function): + r""" + Dolan function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_D.html#go_benchmark.Dolan] + + .. math:: + f(x)=|(x_1 + c_1x_2)\sin(x_1) - c_2x_3 - c_3x_4\cos(x_5 + x_5 - x_1) + c_4x_5^2 - x_2 - c_5| + + + Default constant values are :math:`c = (1.7, 1.5, 0.1, 0.2, 1)` + + """ + def __init__(self, c1=1.7, c2=1.5, c3=0.1, c4=0.2, c5=1, name="Dolan"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 5 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-100, 100], [-100, 100], [-100, 100], [-100, 100], [-100, 100]])) + + def __call__(self, x): + return (np.abs(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = np.sign(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)*((np.sin(x[:, 0])+((x[:, 0]+(self.c1*x[:, 1]))*np.cos(x[:, 0])))-((self.c3*x[:, 3])*(-(-np.sin((x[:, 4]+x[:, 4])-x[:, 0]))))) + grad[:, 0, 1] = np.sign(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)*((self.c1*np.sin(x[:, 0]))-1) + grad[:, 0, 2] = np.sign(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)*(-self.c2) + grad[:, 0, 3] = np.sign(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)*(-(self.c3*np.cos((x[:, 4]+x[:, 4])-x[:, 0]))) + grad[:, 0, 4] = np.sign(((((((x[:, 0]+(self.c1*x[:, 1]))*np.sin(x[:, 0]))-(self.c2*x[:, 2]))-((self.c3*x[:, 3])*np.cos((x[:, 4]+x[:, 4])-x[:, 0])))+(self.c4*x[:, 4]**2))-x[:, 1])-self.c5)*((-((self.c3*x[:, 3])*((-np.sin((x[:, 4]+x[:, 4])-x[:, 0]))*(1+1))))+(self.c4*(x[:, 4]**2*(2*(1/x[:, 4]))))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Friedman(Function): + r""" + Friedman function + + Reference: [https://www.sfu.ca/~ssurjano/fried.html] + + A 5d trigonometric function, linear in :math:`x_4` and :math:`x_5` + + + Default constant values are :math:`c = (10., 20., 0.5, 2., 10., 5.)` + + .. math:: + f(x)=c_1\sin(\pi x_1x_2)+c_2(x_3-c_3)^{c_4}+c_5x_4+c_6x_5 + + """ + def __init__(self, c1=10., c2=20., c3=-0.5, c4=2., c5=10., c6=5., name="Friedman"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6 = c1, c2, c3, c4, c5, c6 + self.dim = 5 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + x1, x2, x3, x4, x5 = x[:, 0], x[:, 1], x[:, 2], x[:, 3], x[:, 4] + return (self.c1 * np.sin(np.pi * x1 * x2) + self.c2 * (x3 - self.c3) ** self.c4 + self.c5 * x4 + self.c6 * x5)[:, np.newaxis] + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + grad[:, 0, 0] = self.c1 * np.pi * x[:, 1] * np.cos(np.pi * x[:, 0] * x[:, 1]) + grad[:, 0, 1] = self.c1 * np.pi * x[:, 0] * np.cos(np.pi * x[:, 0] * x[:, 1]) + grad[:, 0, 2] = self.c2 * self.c4 * (x[:, 2] - self.c3) ** (self.c4 - 1) + grad[:, 0, 3] = self.c5 + grad[:, 0, 4] = self.c6 + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class GramacyLee(Function): + r""" + Gramacy and Lee function + + Reference: [https://www.sfu.ca/~ssurjano/grlee09.html] + + A 6d function, where :math:`x_5` and :math:`x_6` aren't active + + .. math:: + f(x)=e^{sin((c_1(x_1+c_2))^{c_3})}+x_2x_3+x_4 + + Default constant values are :math:`c = (0.9, 0.48, 10.)`. + + """ + def __init__(self, c1=0.9, c2=0.48, c3=10., name="GramacyLee"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 6 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3] + return (np.exp(np.sin((self.c1 * (x1 + self.c2)) ** self.c3)) + x2 * x3 + x4)[:, np.newaxis] + + def grad(self, x): + x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3] + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + grad[:, 0, 0] = (np.exp(np.sin((self.c1 * (x1 + self.c2)) ** self.c3))) \ + * np.cos((self.c1 * (x1 + self.c2)) ** self.c3) \ + * self.c3 * (self.c1 * (x1 + self.c2)) ** (self.c3 - 1) \ + * self.c1 + grad[:, 0, 1] = x3 + grad[:, 0, 2] = x2 + grad[:, 0, 3] = 1. + grad[:, 0, 4:] = 0. + + return grad diff --git a/src/pytuq/func/bench1d.py b/src/pytuq/func/bench1d.py new file mode 100755 index 0000000..2fb1867 --- /dev/null +++ b/src/pytuq/func/bench1d.py @@ -0,0 +1,531 @@ +#!/usr/bin/env python +""" +1d benchmark functions module. + +Most of the functions are taken from https://github.com/Vahanosi4ek/pytuq_funcs. +""" +import sys +import numpy as np + +from .func import Function + +################################################################################ +################################################################################ +################################################################################ + +class TFData(Function): + r"""TensorFlow function + + Data generating toy model inspired by https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb#scrollTo=5zCEYpzu7bDX . + + .. math:: + f(x)=w_0 x (1 + \sin(x)) + b_0 + + Default constant values are :math:`w_0 = 0.125`, :math:`b_0 = 5.0`, :math:`a = -20.0`, :math:`b = 60.0`. + """ + + def __init__(self, name='tfdata'): + super().__init__() + self.name = name + + self.dim = 1 + self.outdim = 1 + + self.w0 = 0.125 + self.b0 = 5. + self.a = -20. + self.b = 60. + + self.setDimDom(domain=np.array([[self.a, self.b]])) + + return + + def __call__(self, x): + + y = (self.w0 * x * (1. + np.sin(x)) + self.b0) + + return y + + def grad(self, x): + + dy = self.w0 * (1. + np.sin(x) + x * np.cos(x)) + + return dy.reshape(-1, self.outdim, self.dim) + +################################################################################ +################################################################################ +################################################################################ + +class SineSum(Function): + r"""Simple sum of sines + + Problem 02 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem02] + + .. math:: + f(x)=\sin(c_1x)+\sin(c_2x) + + + Default constant values are :math:`c = (1., 10./3.)`. + + """ + def __init__(self, c1=1., c2=10./3., name="SineSum"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([2.7, 7.5])) + + def __call__(self, x): + return np.sin(self.c1 * x) + np.sin(self.c2 * x) + + def grad(self, x): + return (self.c1 * np.cos(self.c1 * x) + self.c2 * np.cos(self.c2 * x))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class SineSum2(Function): + r"""A more complex sum of sines + + Problem 03 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem03] + + .. math:: + f(x)=-\sum_{k=1}^{c_1}k\sin((k+1)x+k) + + + Default constant value is :math:`c = 6`. + + """ + def __init__(self, c1=6, name="SineSum2"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + summation = np.zeros((x.shape[0], self.c1)) + k = np.broadcast_to(np.arange(1, self.c1 + 1), (x.shape[0], self.c1)) + summation += k * np.sin((k + 1) * x + k) + + return -np.sum(summation, axis=1, keepdims=True) + + def grad(self, x): + _ = self.__call__(x) + summation = np.zeros((x.shape[0], self.c1)) + k = np.broadcast_to(np.arange(1, self.c1 + 1), (x.shape[0], self.c1)) + summation += k * (k + 1) * np.cos((k + 1) * x + k) + + return -np.sum(summation, axis=1, keepdims=True)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class QuadxExp(Function): + r"""Product of quadratic and exponent + + Problem 04 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem04] + + .. math:: + f(x)=-(c_1x^2+c_2x+c_3)e^{-x} + + + Default constant values are :math:`c = (16., -24., 5.)`. + + """ + def __init__(self, c1=16., c2=-24., c3=5., name="QuadxExp"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([1.9, 3.9])) + + def __call__(self, x): + self._quad = self.c1 * x ** 2 + self.c2 * x + self.c3 + self._exp = np.exp(-x) + return (-self._quad * self._exp) + + def grad(self, x): + _ = self.__call__(x) + return -(self._quad * -self._exp + self._exp * (2 * self.c1 * x + self.c2))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class LinxSin(Function): + r"""Product of linear and sine functions + + Problem 05 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem05] + + .. math:: + f(x)=-(c_1-c_2x)sin(c_3x) + + + Default constant values are :math:`c = (1.4, -3., 18.)`. + + """ + def __init__(self, c1=1.4, c2=-3., c3=18., name="LinxSin"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.2])) + + def __call__(self, x): + self._linear = (self.c1 + self.c2 * x) + self._sine = np.sin(self.c3 * x) + return -self._linear * self._sine + + def grad(self, x): + _ = self.__call__(x) + return -(self._linear * self.c3 * np.cos(self.c3 * x) + self._sine * self.c2)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class SinexExp(Function): + r"""Product of sine and exp functions + + Problem06 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem06] + + .. math:: + f(x)=-(x+\sin(x))e^{-x^2} + + """ + def __init__(self, name="SinexExp"): + super().__init__(name=name) + + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + self._sine = (x + np.sin(x)) + self._exp = np.exp(-x ** 2) + return -self._sine * self._exp + + def grad(self, x): + _ = self.__call__(x) + return -(self._sine * (-self._exp * 2 * x) + self._exp * (np.cos(x) + 1))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class SineLogSum(Function): + r"""Sum of sine and log functions + + Problem07 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem07] + + .. math:: + f(x)=\sin(c_1x) + \sin(c_2x) + \log_{c_3}(x) + c_4x + c_5 + + + Default constant values are :math:`c = (1., 10/3, e, -0.84, 3.)`. + + """ + def __init__(self, c1=1., c2=10/3, c3=np.exp(1), c4=-.84, c5=3., name="SineLogSum"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([2.7, 7.5])) + + def __call__(self, x): + return np.sin(self.c1 * x) + np.sin(self.c2 * x) + np.emath.logn(self.c3, x) + self.c4 * x + self.c5 + + def grad(self, x): + return (self.c1 * np.cos(self.c1 * x) + self.c2 * np.cos(self.c2 * x) + 1 / (x * np.log(self.c3)) + self.c4)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class CosineSum(Function): + r"""Simple sum of cosines + + Problem 08 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem08] + + .. math:: + f(x)=-\sum_{k=1}^{c_1}k\cos((k+1)x+k) + + + Default constant value is :math:`c = 6`. + + """ + def __init__(self, c1=6, name="CosineSum"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + summation = np.zeros((x.shape[0], self.c1)) + k = np.broadcast_to(np.arange(1, self.c1 + 1), (x.shape[0], self.c1)) + summation += k * np.cos((k + 1) * x + k) + + return -np.sum(summation, axis=1, keepdims=True) + + def grad(self, x): + summation = np.zeros((x.shape[0], self.c1)) + k = np.broadcast_to(np.arange(1, self.c1 + 1), (x.shape[0], self.c1)) + summation -= k * (k + 1) * np.sin((k + 1) * x + k) + + return -np.sum(summation, axis=1, keepdims=True)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Sinex(Function): + r"""Product of x and sine function + + Problem10 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem10] + + .. math:: + f(x)=-x\sin(x) + + """ + def __init__(self, name="Sinex"): + super().__init__(name=name) + + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 10.])) + + def __call__(self, x): + return -x * np.sin(x) + + def grad(self, x): + return (-x * np.cos(x) - np.sin(x))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class CosineSum2(Function): + r"""Simple sum of cosines + + Problem11 [https://infinity77.net/global_optimization/test_functions_1d.html#go_benchmark.Problem11] + + .. math:: + f(x)=c_1\cos(x) + \cos(c_2x) + + + Default constant values are :math:`c = (2., 2.)`. + + """ + def __init__(self, c1=2., c2=2., name="CosineSum2"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-np.pi / 2, np.pi * 2])) + + def __call__(self, x): + return self.c1 * np.cos(x) + np.cos(self.c2 * x) + + def grad(self, x): + return (-self.c1 * np.sin(x) - self.c2 * np.sin(self.c2 * x))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Sinusoidal(Function): + r"""Simple 1d sine function + + Sinusoidal [https://www.sfu.ca/~ssurjano/curretal88sin.html] + + .. math:: + f(x)=\sin(c_1\pi(x-c_2)) + + Default constant values are :math:`c = (2., 0.1)`. + + """ + def __init__(self, c1=2., c2=0.1, name="Sinusoidal"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + return np.sin(self.c1 * np.pi * (x - self.c2)) + + def grad(self, x): + return (np.cos(self.c1 * np.pi * (x - self.c2)) * self.c1 * np.pi)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Forrester(Function): + r"""Forrester function + + Forrester [https://www.sfu.ca/~ssurjano/forretal08.html] + + .. math:: + f(x)=(c_1x-c_2)^2\sin(c_3x-c_4) + + Default constant values are :math:`c = (6., 2., 12., 4)`. + + """ + def __init__(self, c1=6., c2=2., c3=12., c4=4., name="Forrester"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + return (self.c1 * x - self.c2) ** 2 * np.sin(self.c3 * x - self.c4) + + def grad(self, x): + return (2 * self.c1 * (self.c1 * x - self.c2) * np.sin(self.c3 * x - self.c4) + (self.c1 * x - self.c2) ** 2 * self.c3 * np.cos(self.c3 * x - self.c4))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class GramacyLee2(Function): + r"""Complicated oscillatory 1d function + + Gramacy and Lee (2012) [https://www.sfu.ca/~ssurjano/grlee12.html] + + .. math:: + f(x)=\frac{\sin(c_1\pi x)}{c_2x}+(x-c_3)^{c_4} + + Default constant values are :math:`c = (10., 2., 1., 4.)`. + + """ + def __init__(self, c1=10., c2=2., c3=1., c4=4., name="GramacyLee2"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0.5, 2.5])) + + def __call__(self, x): + return np.sin(self.c1 * np.pi * x) / (self.c2 * x) + (x - self.c3) ** self.c4 + + def grad(self, x): + return (((self.c1 * np.pi * np.cos(self.c1 * np.pi * x) * self.c2 * x) - self.c2 * np.sin(self.c1 * np.pi * x)) / (self.c2 * x) ** 2 + self.c4 * (x - self.c3) ** (self.c4 - 1))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Higdon(Function): + r"""Higdon function + + Higdon (2002) [https://www.sfu.ca/~ssurjano/hig02.html] + + .. math:: + f(x)=\sin(2\pi x/c_1) + c_2\sin(2\pi x/c_3) + + Default constant values are :math:`c = (10., 0.2, 2.5)`. + + """ + def __init__(self, c1=10., c2=0.2, c3=2.5, name="Higdon"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 10.])) + + def __call__(self, x): + return np.sin(2 * np.pi * x / self.c1) + self.c2 * np.sin(2 * np.pi * x / self.c3) + + def grad(self, x): + return (2 * np.pi / self.c1 * np.cos(2 * np.pi * x / self.c1) + 2 * np.pi * self.c2 / self.c3 * np.cos(2 * np.pi * x / self.c3))[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Holsclaw(Function): + r"""Holsclaw function + + Holsclaw et al. [https://www.sfu.ca/~ssurjano/holsetal13sin.html] + + .. math:: + f(x)=\frac{x\sin(x)}{c_1} + + Default constant value is :math:`c = 10.0`. + + """ + def __init__(self, c1=10., name="Holsclaw"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 1 + self.outdim = 1 + + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 10.])) + + def __call__(self, x): + return x * np.sin(x) / self.c1 + + def grad(self, x): + return ((np.sin(x) + x * np.cos(x)) / self.c1)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class DampedCosine(Function): + r"""A simple 1d cosine function + + Damped Cosine [https://www.sfu.ca/~ssurjano/santetal03dc.html] + + .. math:: + f(x)=e^{c_1x}\cos(c_2\pi x) + + Default constant values are :math:`c = (-1.4, 3.5)`. + + """ + def __init__(self, c1=-1.4, c2=3.5, name="DampedCosine"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 1 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + return np.exp(self.c1 * x) * np.cos(self.c2 * np.pi * x) + + def grad(self, x): + return (self.c1 * self.__call__(x) - self.c2 * np.pi * np.exp(self.c1 * x) * np.sin(self.c2 * np.pi * x))[:, np.newaxis, :] diff --git a/src/pytuq/func/bench2d.py b/src/pytuq/func/bench2d.py new file mode 100755 index 0000000..85c4cfa --- /dev/null +++ b/src/pytuq/func/bench2d.py @@ -0,0 +1,2342 @@ +#!/usr/bin/env python +""" +2d benchmark functions module. + +Most of the functions are taken from https://github.com/Vahanosi4ek/pytuq_funcs that autogenerates the codes given function's latex strings. +""" +import sys +import numpy as np +from scipy.stats import multivariate_normal + +from .func import Function + + +################################################################################ +################################################################################ +################################################################################ + +class Adjiman(Function): + r"""Adjiman function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Adjiman] + + .. math:: + f(x)=\cos(x_1)\sin(x_2)-\frac{x_1}{x_2^2+c_1} + + Default constant value is :math:`c_1 = (1.0)`. + + """ + def __init__(self, c1=1., name="Adjiman"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-1., 2.,], [-1., 1.]])) + + def __call__(self, x): + return (np.cos(x[:, 0]) * np.sin(x[:, 1]) - x[:, 0] / (x[:, 1] ** 2 + self.c1))[:, np.newaxis] + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + x1, x2 = x[:, 0], x[:, 1] + + grad[:, 0, 0] = np.sin(x2) * -np.sin(x1) - 1 / (x2 ** 2 + self.c1) + grad[:, 0, 1] = np.cos(x1) * np.cos(x2) - x1 * -1 / (x2 ** 2 + self.c1) ** 2 * 2 * x2 + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class BartelsConn(Function): + r"""BartelsConn function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.BartelsConn] + + .. math:: + f(x)=|x_1^2+x_2^2+x_1x_2|+|\sin(x_1)|+|\cos(x_2)| + + """ + def __init__(self, name="BartelsConn"): + super().__init__(name=name) + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-50., 50.])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + self._t1 = x1 ** 2 + x2 ** 2 + x1 * x2 + self._t2 = np.sin(x1) + self._t3 = np.cos(x2) + return (np.abs(self._t1) + np.abs(self._t2) + np.abs(self._t3))[:, np.newaxis] + + def grad(self, x): + _ = self.__call__(x) + x1, x2 = x[:, 0], x[:, 1] + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = np.sign(self._t1) * (2 * x1 + x2) + np.sign(self._t2) * np.cos(x1) + grad[:, 0, 1] = np.sign(self._t1) * (2 * x2 + x1) + np.sign(self._t3) * -np.sin(x2) + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Bird(Function): + r"""Bird function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Bird] + + .. math:: + f(x)=(x_1-x_2)^2+e^{(c_1-\sin(x_1))^2}\cos(x_2)+e^{(c_2-\cos(x_2))^2}\sin(x_1) + + Default constant values are :math:`c = (1., 1.)`. + + """ + def __init__(self, c1=1., c2=1., name="Bird"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-2 * np.pi, 2 * np.pi])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + self._t1 = x1 - x2 + self._t2_1 = np.exp((self.c1 - np.sin(x1)) ** 2) + self._t2_2 = np.cos(x2) + self._t3_1 = np.exp((self.c2 - np.cos(x2)) ** 2) + self._t3_2 = np.sin(x1) + return (self._t1 ** 2 + self._t2_1 * self._t2_2 + self._t3_1 * self._t3_2)[:, np.newaxis] + + def grad(self, x): + _ = self.__call__(x) + x1, x2 = x[:, 0], x[:, 1] + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + _t2_1_grad = self._t2_1 * 2 * (self.c1 - np.sin(x1)) * -np.cos(x1) + _t2_2_grad = -np.sin(x2) + _t3_1_grad = self._t3_1 * 2 * (self.c2 - np.cos(x2)) * np.sin(x2) + _t3_2_grad = np.cos(x1) + grad[:, 0, 0] = 2 * self._t1 + self._t2_2 * _t2_1_grad + self._t3_1 * _t3_2_grad + grad[:, 0, 1] = -2 * self._t1 + self._t2_1 * _t2_2_grad + self._t3_2 * _t3_1_grad + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Branin(Function): + """Branin function + + Reference: [https://www.sfu.ca/~ssurjano/branin.html] + + """ + def __init__(self, name='Branin'): + super().__init__() + self.setDimDom(domain=np.array([[-5., 10.], [0., 15.]])) + self.name = name + self.outdim = 1 + + self.a_ = 1. + self.b_ = 5.1/(4*np.pi**2) + self.c_ = 5./np.pi + self.r_ = 6. + self.s_ = 10. + self.t_ = 1./(8.*np.pi) + + return + + def __call__(self, x): + self.checkDim(x) + + yy = self.a_ * (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_)**2 + yy += self.s_*(1.-self.t_)*np.cos(x[:, 0]) + yy += self.s_ + + return yy.reshape(-1,1) + + + def grad(self, x): + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = 2.0 * self.a_ * \ + (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_) * \ + (-2.0 * self.b_ * x[:, 0] + self.c_) - \ + self.s_ * (1. - self.t_) * np.sin(x[:, 0]) + grad[:, 0, 1] = 2.0 * self.a_ * \ + (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_) + + + return grad + +################################################################################ +################################################################################ +################################################################################ + + +class Branin01(Function): + r"""Branin01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Branin01] + + .. math:: + f(x)=\left(-c_1 \frac{x_1^{2}}{\pi^{2}} + c_2 \frac{x_1}{\pi} + x_2 - c_3\right)^{2} + \left(c_4 - \frac{c_5}{c_6 \pi} \right) \cos\left(x_1\right) + c_7 + + Default constant values are :math:`c = (1.275, 5., 6., 10., 5., 4., 10.)`. + + """ + def __init__(self, c1=1.275, c2=5., c3=6., c4=10., c5=5., c6=4., c7=10., name="Branin01"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7 = c1, c2, c3, c4, c5, c6, c7 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5., 10.], [0., 15.]])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + self._t1 = -self.c1 * x1 ** 2 / np.pi ** 2 + self.c2 * x1 / np.pi + x2 - self.c3 + self._t2 = self.c4 - self.c5 / (self.c6 * np.pi) + return (self._t1 ** 2 + self._t2 * np.cos(x1) + self.c7)[:, np.newaxis] + + def grad(self, x): + _ = self.__call__(x) + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + x1, x2 = x[:, 0], x[:, 1] + grad[:, 0, 0] = 2 * self._t1 * (-self.c1 * 2 / np.pi ** 2 * x1 + self.c2 / np.pi) - self._t2 * np.sin(x1) + grad[:, 0, 1] = 2 * self._t1 + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Branin02(Function): + r"""Branin02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Branin02] + + .. math:: + f(x)=\left(- c_1 \frac{x_1^{2}}{\pi^{2}} + c_2 \frac{x_1}{\pi} + x_2 -c_3\right)^{2} + \left(c_4 - \frac{c_5}{c_6 \pi} \right) \cos\left(x_1\right) \cos\left(x_2\right) + \log(x_1^2+x_2^2 +c_7) + c_8 + + + Default constant values are :math:`c = (1.275, 5., 6., 10., 5., 4., 1., 10.)`. + + """ + def __init__(self, c1=1.275, c2=5., c3=6., c4=10., c5=5., c6=4., c7=1., c8=10., name="Branin02"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8 = c1, c2, c3, c4, c5, c6, c7, c8 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-5., 15.])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + self._t1 = -self.c1 * x1 ** 2 / np.pi ** 2 + self.c2 * x1 / np.pi + x2 - self.c3 + self._t2 = self.c4 - self.c5 / (self.c6 * np.pi) + self._t3 = np.log(x1 ** 2 + x2 ** 2 + self.c7) + return (self._t1 ** 2 + self._t2 * np.cos(x1) * np.cos(x2) + self._t3 + self.c8)[:, np.newaxis] + + def grad(self, x): + _ = self.__call__(x) + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + x1, x2 = x[:, 0], x[:, 1] + grad[:, 0, 0] = 2 * self._t1 * (-self.c1 * 2 / np.pi ** 2 * x1 + self.c2 / np.pi) - self._t2 * np.cos(x2) * np.sin(x1) + 1 / (x1 ** 2 + x2 ** 2 + self.c7) * 2 * x1 + grad[:, 0, 1] = 2 * self._t1 - self._t2 * np.cos(x1) * np.sin(x2) + 1 / (x1 ** 2 + x2 ** 2 + self.c7) * 2 * x2 + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Brent(Function): + r"""Brent function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Brent] + + .. math:: + f(x)=(x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2-x_2^2)} + + + Default constant values are :math:`c = (10., 10.)`. + + """ + def __init__(self, c1=10., c2=10., name="Brent"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + return ((x1 + self.c1) ** 2 + (x2 + self.c2) ** 2 + np.exp(-x1 ** 2 - x2 ** 2))[:, np.newaxis] + + def grad(self, x): + x1, x2 = x[:, 0], x[:, 1] + return (2 * (x + self.c1) + np.exp(-x1 ** 2 - x2 ** 2)[:, np.newaxis] * -2 * x)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Bukin02(Function): + r"""Bukin02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Bukin02] + + .. math:: + f(x)=c_1 (x_2 - c_2 x_1^2 + c_3) + c_4 (x_1 + c_5)^2 + + + Default constant values are :math:`c = (100.0, 0.01, 1.0, 0.01, 10.0)` + + """ + def __init__(self, c1=100.0, c2=0.01, c3=1.0, c4=0.01, c5=10.0, name="Bukin02"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-15., -5.], [-3., 3.]])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + return (self.c1*(x[:, 1]-self.c2*x[:, 0]**2.0+self.c3)+self.c4*(x[:, 0]+self.c5)**2.0).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = -2.0*self.c2*self.c1*x[:, 0]+2.0*self.c4*(x[:, 0]+self.c5) + grad[:, 0, 1] = self.c1 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Bukin04(Function): + r"""Bukin04 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Bukin04] + + .. math:: + f(x)=c_1 x_2^{2} + c_2 |x_1 + c_3| + + + Default constant values are :math:`c = (100.0, 0.01, 10.0)` + + """ + def __init__(self, c1=100.0, c2=0.01, c3=10.0, name="Bukin04"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-15.0, -5.0], [-3.0, 3.0]])) + + def __call__(self, x): + return (self.c1*x[:, 1]**(2.0)+self.c2*np.abs(x[:, 0]+self.c3)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = self.c2*np.sign(x[:, 0]+self.c3) + grad[:, 0, 1] = 2.0*self.c1*x[:, 1] + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Bukin6(Function): + r"""Bukin6 function + + .. math:: + f(x)=c_1\sqrt{|x_2-c_2 x_1^2|}+c_3|x_1+c_4| + + + Default constant values are :math:`c = (100.0, 0.01, 0.01, 10.0)` + + """ + def __init__(self, c1=100.0, c2=0.01, c3=0.01, c4=10.0, name="Bukin6"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-15., -5.], [-3., 3.]])) + + def __call__(self, x): + return (self.c1*np.sqrt(np.abs(x[:, 1]-self.c2*x[:, 0]**2.0))+self.c3*np.abs(x[:, 0]+self.c4)).reshape(-1, 1) + + def grad(self, x): + x1 = x[:, 0] + x2 = x[:, 1] + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + x1t1 = self.c1 * -np.sign(x2 - self.c2 * x1 ** 2) / np.sqrt(np.abs(x2 - self.c2 * x1 ** 2)) * self.c2 * x1 + x1t2 = self.c3 * np.sign(x1 + self.c4) + + x2t1 = self.c1 * np.sign(x2 - self.c2 * x1 ** 2) / (2 * np.sqrt(np.abs(x2 - self.c2 * x1 ** 2))) + + grad[:, 0, 0] = x1t1 + x1t2 + grad[:, 0, 1] = x2t1 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class CarromTable(Function): + r"""CarromTable function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CarromTable] + + .. math:: + f(x)=- c_1 \exp(c_2 |c_3 - \frac{\sqrt{x_1^{2} + x_2^{2}}}{\pi}|) \cos(x_1)^2 \cos(x_2)^2 + + + Default constant values are :math:`c = (1/30, 2.0, 1.0)` + + """ + def __init__(self, c1=1/30, c2=2.0, c3=1.0, name="CarromTable"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10.0, 10.0], [-10.0, 10.0]])) + + def __call__(self, x): + return (-self.c1*np.exp(self.c2*np.abs(self.c3-(np.sqrt(x[:, 0]**(2.0)+x[:, 1]**(2.0)))/(np.pi)))*np.cos(x[:, 0])**2.0*np.cos(x[:, 1])**2.0).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = self.c1*np.exp(self.c2*np.abs(self.c3-(np.sqrt(x[:, 0]**(2.0)+x[:, 1]**(2.0)))/(np.pi)))*self.c2*np.sign(self.c3-(np.sqrt(x[:, 0]**(2.0)+x[:, 1]**(2.0)))/(np.pi))*(1.0)/(2.0*np.pi*np.sqrt(x[:, 0]**2.0+x[:, 1]**2.0))*2.0*x[:, 0]*np.cos(x[:, 0])**2.0*np.cos(x[:, 1])**2.0+self.c1*np.exp(self.c2*np.abs(self.c3-(np.sqrt(x[:, 0]**(2.0)+x[:, 1]**(2.0)))/(np.pi)))*2.0*np.cos(x[:, 0])*np.sin(x[:, 0])*np.cos(x[:, 1])**2.0 + grad[:, 0, 1] = self.c1*np.exp(self.c2*np.abs(self.c3-(np.sqrt(x[:, 1]**(2.0)+x[:, 0]**(2.0)))/(np.pi)))*self.c2*np.sign(self.c3-(np.sqrt(x[:, 1]**(2.0)+x[:, 0]**(2.0)))/(np.pi))*(1.0)/(2.0*np.pi*np.sqrt(x[:, 1]**2.0+x[:, 0]**2.0))*2.0*x[:, 1]*np.cos(x[:, 1])**2.0*np.cos(x[:, 0])**2.0+self.c1*np.exp(self.c2*np.abs(self.c3-(np.sqrt(x[:, 1]**(2.0)+x[:, 0]**(2.0)))/(np.pi)))*2.0*np.cos(x[:, 1])*np.sin(x[:, 1])*np.cos(x[:, 0])**2.0 + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class ChengSandu(Function): + r"""Cheng and Sandu 2d function + + .. math:: + f(x)=\cos(x_1+x_2)e^{x_1x_2} + + """ + def __init__(self, name="ChengSandu"): + super().__init__(name=name) + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + return (np.cos(x[:, 0] + x[:, 1]) * np.exp(x[:, 0] * x[:, 1]))[:, np.newaxis] + + def grad(self, x): + exp = np.exp(x[:, 0] * x[:, 1])[:, np.newaxis] + sin = np.sin(x[:, 0] + x[:, 1])[:, np.newaxis] + cos = np.cos(x[:, 0] + x[:, 1])[:, np.newaxis] + + return ((x[:, ::-1] * cos - sin) * exp)[:, np.newaxis, :] + +################################################################################ +################################################################################ +################################################################################ + +class Chichinadze(Function): + r"""Chichinadze function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.Chichinadze] + + .. math:: + f(x)= x_1^{2} - c_1 x_1 + c_2 \sin(c_3 \pi x_1) + c_4 \cos(c_5 \pi x_1) + c_6 - \frac{c_7}{\exp(c_8 (x_2 -c_9)^{2})} + + + Default constant values are :math:`c = (12.0, 8.0, 2.5, 10.0, 0.5, 11.0, 0.2 * \sqrt{5}, 0.5, 0.5)` + + """ + def __init__(self, c1=12.0, c2=8.0, c3=2.5, c4=10.0, c5=0.5, c6=11.0, c7=0.2 * np.sqrt(5), c8=0.5, c9=0.5, name="Chichinadze"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8, self.c9 = c1, c2, c3, c4, c5, c6, c7, c8, c9 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-30.0, 30.0], [-30.0, 30.0]])) + + def __call__(self, x): + return (x[:, 0]**(2.0)-self.c1*x[:, 0]+self.c2*np.sin(self.c3*np.pi*x[:, 0])+self.c4*np.cos(self.c5*np.pi*x[:, 0])+self.c6-(self.c7)/(np.exp(self.c8*(x[:, 1]-self.c9)**(2.0)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = 2.0*x[:, 0]-self.c1+self.c2*self.c3*np.pi*np.cos(self.c3*np.pi*x[:, 0])-self.c4*self.c5*np.pi*np.sin(self.c5*np.pi*x[:, 0]) + grad[:, 0, 1] = (self.c7)/(np.exp(self.c8*(x[:, 1]-self.c9)**(2.0))**2.0)*np.exp(self.c8*(x[:, 1]-self.c9)**(2.0))*self.c8*2.0*(x[:, 1]-self.c9) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class CrossInTray(Function): + r"""Cross-In-Tray function + + .. math:: + f(x)=-c_1(|\sin(x_1)\sin(x_2)|e^{|c_2-\frac{\sqrt{x_1^2+x_2^2}}{\pi}|}+c_3)^{c_4} + + + Default constant values are :math:`c = (0.0001, 100., 1., 0.1)`. + + """ + def __init__(self, c1=0.0001, c2=100., c3=1., c4=0.1, name="CrossInTray"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + inner1 = np.sin(x1) * np.sin(x2) + inner2 = np.exp(np.abs(self.c2 - np.sqrt(x1 ** 2 + x2 ** 2) / np.pi)) + + return (-self.c1 * (np.abs(inner1 * inner2) + self.c3) ** self.c4)[:, np.newaxis] + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + x1, x2 = x[:, 0], x[:, 1] + + dist = np.sqrt(x1 ** 2 + x2 ** 2) + inner1x1 = np.cos(x1) * np.sin(x2) + inner1x2 = np.cos(x2) * np.sin(x1) + inner2x1 = -np.exp(np.abs(self.c2 - dist / np.pi)) * np.sign(self.c2 - dist / np.pi) * x1 / (np.pi * dist) + inner2x2 = -np.exp(np.abs(self.c2 - dist / np.pi)) * np.sign(self.c2 - dist / np.pi) * x2 / (np.pi * dist) + innerx1 = inner1x1 * np.exp(np.abs(self.c2 - dist / np.pi)) + np.sin(x1) * np.sin(x2) * inner2x1 + innerx2 = inner1x2 * np.exp(np.abs(self.c2 - dist / np.pi)) + np.sin(x1) * np.sin(x2) * inner2x2 + inner_absx1 = np.sign(np.abs(np.sin(x1) * np.sin(x2) * np.exp(np.abs(self.c2 - dist / np.pi)))) * innerx1 + inner_absx2 = np.sign(np.abs(np.sin(x1) * np.sin(x2) * np.exp(np.abs(self.c2 - dist / np.pi)))) * innerx2 + + grad[:, 0, 0] = -self.c1 * self.c4 * (np.abs(np.sin(x1) * np.sin(x2) * np.exp(np.abs(self.c2 - dist / np.pi))) + self.c3) ** (self.c4 - 1) * inner_absx1 + grad[:, 0, 1] = -self.c1 * self.c4 * (np.abs(np.sin(x1) * np.sin(x2) * np.exp(np.abs(self.c2 - dist / np.pi))) + self.c3) ** (self.c4 - 1) * inner_absx2 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Damavandi(Function): + r"""Damavandi function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_D.html#go_benchmark.Damavandi] + + .. math:: + f(x)=\left[ c_1 - |\frac{\sin[\pi(x_1-c_2)]\sin[\pi(x_2-c_3)]}{\pi^2(x_1-c_4)(x_2-c_5)}|^{c_6} \right] \left[c_7 + (x_1-c_8)^2 + c_9(x_2-c_{10})^2 \right] + + + Default constant values are :math:`c = (1.0, 2.0, 2.0, 2.0, 2.0, 5.0, 2.0, 7.0, 2.0, 7.0)`. + + """ + def __init__(self, c1=1.0, c2=2.0, c3=2.0, c4=2.0, c5=2.0, c6=5.0, c7=2.0, c8=7.0, c9=2.0, c10=7.0, name="Damavandi"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8, self.c9, self.c10 = c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[0.0, 14.0], [0.0, 14.0]])) + + def __call__(self, x): + return (((self.c1)-((np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))**(self.c6)))*(((self.c7)+(((x[:, 0])-(self.c8))**2))+((self.c9)*(((x[:, 1])-(self.c10))**2)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((-(((np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))**(self.c6))*((self.c6)*((1/(np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5))))))*((np.sign(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))*((((((np.cos(np.pi*((x[:, 0])-(self.c2))))*(np.pi*1))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))*(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5))))-(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))*(((np.pi**2)*1)*((x[:, 1])-(self.c5)))))/((((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))**2)))))))*(((self.c7)+(((x[:, 0])-(self.c8))**2))+((self.c9)*(((x[:, 1])-(self.c10))**2))))+(((self.c1)-((np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))**(self.c6)))*((((x[:, 0])-(self.c8))**2)*(2*((1/((x[:, 0])-(self.c8)))*1)))) + grad[:, 0, 1] = ((-(((np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))**(self.c6))*((self.c6)*((1/(np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5))))))*((np.sign(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))*(((((np.sin(np.pi*((x[:, 0])-(self.c2))))*((np.cos(np.pi*((x[:, 1])-(self.c3))))*(np.pi*1)))*(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5))))-(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))*(((np.pi**2)*((x[:, 0])-(self.c4)))*1)))/((((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))**2)))))))*(((self.c7)+(((x[:, 0])-(self.c8))**2))+((self.c9)*(((x[:, 1])-(self.c10))**2))))+(((self.c1)-((np.abs(((np.sin(np.pi*((x[:, 0])-(self.c2))))*(np.sin(np.pi*((x[:, 1])-(self.c3)))))/(((np.pi**2)*((x[:, 0])-(self.c4)))*((x[:, 1])-(self.c5)))))**(self.c6)))*((self.c9)*((((x[:, 1])-(self.c10))**2)*(2*((1/((x[:, 1])-(self.c10)))*1))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class DeckkersAarts(Function): + r"""DeckkersAarts function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_D.html#go_benchmark.DeckkersAarts] + + .. math:: + f(x)=c_1x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + c_2(x_1^2 + x_2^2)^4 + + + Default constant values are :math:`c = (1000, 0.001)`. + + """ + def __init__(self, c1=1000, c2=0.001, name="DeckkersAarts"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-20.0, 20.0], [-20.0, 20.0]])) + + def __call__(self, x): + return (((((self.c1)*(x[:, 0]**2))+(x[:, 1]**2))-(((x[:, 0]**2)+(x[:, 1]**2))**2))+((self.c2)*(((x[:, 0]**2)+(x[:, 1]**2))**4))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((self.c1)*(((x[:, 0])**2)*(2*((1/(x[:, 0]))*1))))-(((((x[:, 0])**2)+((x[:, 1])**2))**2)*(2*((1/(((x[:, 0])**2)+((x[:, 1])**2)))*(((x[:, 0])**2)*(2*((1/(x[:, 0]))*1)))))))+((self.c2)*(((((x[:, 0])**2)+((x[:, 1])**2))**4)*(4*((1/(((x[:, 0])**2)+((x[:, 1])**2)))*(((x[:, 0])**2)*(2*((1/(x[:, 0]))*1))))))) + grad[:, 0, 1] = ((((x[:, 1])**2)*(2*((1/(x[:, 1]))*1)))-(((((x[:, 0])**2)+((x[:, 1])**2))**2)*(2*((1/(((x[:, 0])**2)+((x[:, 1])**2)))*(((x[:, 1])**2)*(2*((1/(x[:, 1]))*1)))))))+((self.c2)*(((((x[:, 0])**2)+((x[:, 1])**2))**4)*(4*((1/(((x[:, 0])**2)+((x[:, 1])**2)))*(((x[:, 1])**2)*(2*((1/(x[:, 1]))*1))))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class EggCrate(Function): + r"""EggCrate function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_E.html#go_benchmark.EggCrate] + + .. math:: + f(x)=x_1^2 + x_2^2 + c_1 \left[ \sin^2(x_1) + \sin^2(x_2) \right] + + + Default constant values are :math:`c = 25.0`. + + """ + def __init__(self, c1=25.0, name="EggCrate"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return ((x[:, 0]**2+x[:, 1]**2)+(self.c1*(np.sin(x[:, 0])**2+np.sin(x[:, 1])**2))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (x[:, 0]**2*(2*(1/x[:, 0])))+(self.c1*(np.sin(x[:, 0])**2*(2*((1/np.sin(x[:, 0]))*np.cos(x[:, 0]))))) + grad[:, 0, 1] = (x[:, 1]**2*(2*(1/x[:, 1])))+(self.c1*(np.sin(x[:, 1])**2*(2*((1/np.sin(x[:, 1]))*np.cos(x[:, 1]))))) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class EggHolder(Function): + r"""Egg Holder function + + Reference: [https://www.sfu.ca/~ssurjano/egg.html] + + .. math:: + f(x)=-(x_2+c_1)\sin(\sqrt{|x_2+\frac{x_1}{2}+c_2|})-x_1\sin(x_1-(x_2+c_3)) + + + Default constant values are :math:`c = (47., 47., 47.)`. + + Note: this is a modified version: the original's last term has square-root and absolute value in sine argument. + """ + def __init__(self, c1=47., c2=47., c3=47., name='EggHolder'): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-512., 512.])) + + def __call__(self, x): + x1, x2 = x[:, 0], x[:, 1] + self._term1_1 = x2 + self.c1 + self._term1_2 = np.sin(np.sqrt(np.abs(x2 + x1 / 2 + self.c2))) + self._term2 = x1 * np.sin(x1 - (x2 + self.c3)) + return (-self._term1_1 * self._term1_2 - self._term2)[:, np.newaxis] + + def grad(self, x): + _ = self.__call__(x) + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + x1, x2 = x[:, 0], x[:, 1] + x1_grad1 = self._term1_1 * np.cos(np.sqrt(np.abs(x2 + x1 / 2 + self.c2))) * np.sign(x2 + x1 / 2 + self.c2) / (4 * np.sqrt(np.abs(x2 + x1 / 2 + self.c2))) + x1_grad2 = np.sin(x1 - (x2 + self.c3)) + x1 * np.cos(x1 - (x2 + self.c3)) + x2_grad1 = x1_grad1 * 2 + self._term1_2 + x2_grad2 = -x1 * np.cos(x1 - x2 - self.c3) + grad[:, 0, 0] = -x1_grad1 - x1_grad2 + grad[:, 0, 1] = -x2_grad1 - x2_grad2 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class ElAttarVidyasagarDutta(Function): + r"""ElAttarVidyasagarDutta function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_E.html#go_benchmark.ElAttarVidyasagarDutta] + + .. math:: + f(x)=(x_1^2 + x_2 - c_1)^2 + (x_1 + x_2^2 - c_2)^2 + (x_1^2 + x_2^3 - c_3)^2 + + + Default constant values are :math:`c = (10.0, 7.0, 1.0)`. + + """ + def __init__(self, c1=10.0, c2=7.0, c3=1.0, name="ElAttarVidyasagarDutta"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-100, 100], [-100, 100]])) + + def __call__(self, x): + return ((((x[:, 0]**2+x[:, 1])-self.c1)**2+((x[:, 0]+x[:, 1]**2)-self.c2)**2)+((x[:, 0]**2+x[:, 1]**3)-self.c3)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((((x[:, 0]**2+x[:, 1])-self.c1)**2*(2*((1/((x[:, 0]**2+x[:, 1])-self.c1))*(x[:, 0]**2*(2*(1/x[:, 0]))))))+(((x[:, 0]+x[:, 1]**2)-self.c2)**2*(2*(1/((x[:, 0]+x[:, 1]**2)-self.c2)))))+(((x[:, 0]**2+x[:, 1]**3)-self.c3)**2*(2*((1/((x[:, 0]**2+x[:, 1]**3)-self.c3))*(x[:, 0]**2*(2*(1/x[:, 0])))))) + grad[:, 0, 1] = ((((x[:, 0]**2+x[:, 1])-self.c1)**2*(2*(1/((x[:, 0]**2+x[:, 1])-self.c1))))+(((x[:, 0]+x[:, 1]**2)-self.c2)**2*(2*((1/((x[:, 0]+x[:, 1]**2)-self.c2))*(x[:, 1]**2*(2*(1/x[:, 1])))))))+(((x[:, 0]**2+x[:, 1]**3)-self.c3)**2*(2*((1/((x[:, 0]**2+x[:, 1]**3)-self.c3))*(x[:, 1]**3*(3*(1/x[:, 1])))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class FreudensteinRoth(Function): + r"""FreudensteinRoth function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_F.html#go_benchmark.FreudensteinRoth] + + .. math:: + f(x)=\left(x_1 - c_1 + \left[(c_2 - x_2)x_2 - c_3 \right] x_2 \right)^2 + \left (x_1 - c_4 + \left[(x_2 + c_5)x_2 - c_6 \right] x_2 \right)^2 + + + Default constant values are :math:`c = (13.0, 5.0, 2.0, 29.0, 1.0, 14.0)`. + + """ + def __init__(self, c1=13.0, c2=5.0, c3=2.0, c4=29.0, c5=1.0, c6=14.0, name="FreudensteinRoth"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6 = c1, c2, c3, c4, c5, c6 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (((x[:, 0]-self.c1)+((((self.c2-x[:, 1])*x[:, 1])-self.c3)*x[:, 1]))**2+((x[:, 0]-self.c4)+((((x[:, 1]+self.c5)*x[:, 1])-self.c6)*x[:, 1]))**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((x[:, 0]-self.c1)+((((self.c2-x[:, 1])*x[:, 1])-self.c3)*x[:, 1]))**2*(2*(1/((x[:, 0]-self.c1)+((((self.c2-x[:, 1])*x[:, 1])-self.c3)*x[:, 1])))))+(((x[:, 0]-self.c4)+((((x[:, 1]+self.c5)*x[:, 1])-self.c6)*x[:, 1]))**2*(2*(1/((x[:, 0]-self.c4)+((((x[:, 1]+self.c5)*x[:, 1])-self.c6)*x[:, 1]))))) + grad[:, 0, 1] = (((x[:, 0]-self.c1)+((((self.c2-x[:, 1])*x[:, 1])-self.c3)*x[:, 1]))**2*(2*((1/((x[:, 0]-self.c1)+((((self.c2-x[:, 1])*x[:, 1])-self.c3)*x[:, 1])))*((((-x[:, 1])+(self.c2-x[:, 1]))*x[:, 1])+(((self.c2-x[:, 1])*x[:, 1])-self.c3)))))+(((x[:, 0]-self.c4)+((((x[:, 1]+self.c5)*x[:, 1])-self.c6)*x[:, 1]))**2*(2*((1/((x[:, 0]-self.c4)+((((x[:, 1]+self.c5)*x[:, 1])-self.c6)*x[:, 1])))*(((x[:, 1]+(x[:, 1]+self.c5))*x[:, 1])+(((x[:, 1]+self.c5)*x[:, 1])-self.c6))))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Franke(Function): + r"""Franke function + + .. math:: + f(x) = 0.75 e^{-\frac{(9x_1-2)^2 + (9x_2-2)^2}{4}} + 0.75 e^{-\frac{(9x_1+1)^2}{49} - \frac{9x_2+1}{10}} + 0.5 e^{-\frac{(9x_1-7)^2 + (9x_2-3)^2}{4}} - 0.2 e^{-(9x_1-4)^2 - (9x_2-7)^2} + + """ + def __init__(self, name='Franke'): + super().__init__() + self.name = name + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain = np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + + + tt1 = 0.75*np.exp(-((9*x[:,0] - 2)**2 + (9*x[:,1] - 2)**2)/4.) + tt2 = 0.75*np.exp(-((9*x[:,0] + 1)**2)/49 - (9*x[:,1] + 1)/10.) + tt3 = 0.5*np.exp(-((9*x[:,0] - 7)**2 + (9*x[:,1] - 3)**2)/4.) + tt4 = -0.2*np.exp(-(9*x[:,0] - 4)**2 - (9*x[:,1] - 7)**2) + + + + return (tt1 + tt2 + tt3 + tt4).reshape(-1,self.outdim) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + tt1 = 0.75*np.exp(-((9*x[:,0] - 2)**2 + (9*x[:,1] - 2)**2)/4.) + tt2 = 0.75*np.exp(-((9*x[:,0] + 1)**2)/49 - (9*x[:,1] + 1)/10.) + tt3 = 0.5*np.exp(-((9*x[:,0] - 7)**2 + (9*x[:,1] - 3)**2)/4.) + tt4 = -0.2*np.exp(-(9*x[:,0] - 4)**2 - (9*x[:,1] - 7)**2) + + grad[:,0,0] = -2*(9*x[:,0] - 2)*9/4 * tt1 - 2*(9*x[:,0] + 1)*9/49 * tt2 + \ + -2*(9*x[:,0] - 7)*9/4 * tt3 - 2*(9*x[:,0] - 4)*9 * tt4 + grad[:,0,1] = -2*(9*x[:,1] - 2)*9/4 * tt1 - 9./10. * tt2 + \ + -2*(9*x[:,1] - 3)*9/4 * tt3 - 2*(9*x[:,1] - 7)*9 * tt4 + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class GoldsteinPrice(Function): + r"""GoldsteinPrice function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_G.html#go_benchmark.GoldsteinPrice] + + .. math:: + f(x)=\left[ 1+(x_1+x_2+1)^2(19-14x_1+3x_1^2-14x_2+6x_1x_2+3x_2^2) \right] \left[ 30+(2x_1-3x_2)^2(18-32x_1+12x_1^2+48x_2-36x_1x_2+27x_2^2) \right] + + """ + def __init__(self, name="GoldsteinPrice"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-2, 2], [-2, 2]])) + + def __call__(self, x): + return ((1+(((x[:, 0]+x[:, 1])+1)**2*(((((19-(14*x[:, 0]))+(3*x[:, 0]**2))-(14*x[:, 1]))+((6*x[:, 0])*x[:, 1]))+(3*x[:, 1]**2))))*(30+(((2*x[:, 0])-(3*x[:, 1]))**2*(((((18-(32*x[:, 0]))+(12*x[:, 0]**2))+(48*x[:, 1]))-((36*x[:, 0])*x[:, 1]))+(27*x[:, 1]**2))))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((((((x[:, 0]+x[:, 1])+1)**2*(2*(1/((x[:, 0]+x[:, 1])+1))))*(((((19-(14*x[:, 0]))+(3*x[:, 0]**2))-(14*x[:, 1]))+((6*x[:, 0])*x[:, 1]))+(3*x[:, 1]**2)))+(((x[:, 0]+x[:, 1])+1)**2*(((-14)+(3*(x[:, 0]**2*(2*(1/x[:, 0])))))+(6*x[:, 1]))))*(30+(((2*x[:, 0])-(3*x[:, 1]))**2*(((((18-(32*x[:, 0]))+(12*x[:, 0]**2))+(48*x[:, 1]))-((36*x[:, 0])*x[:, 1]))+(27*x[:, 1]**2)))))+((1+(((x[:, 0]+x[:, 1])+1)**2*(((((19-(14*x[:, 0]))+(3*x[:, 0]**2))-(14*x[:, 1]))+((6*x[:, 0])*x[:, 1]))+(3*x[:, 1]**2))))*(((((2*x[:, 0])-(3*x[:, 1]))**2*(2*((1/((2*x[:, 0])-(3*x[:, 1])))*2)))*(((((18-(32*x[:, 0]))+(12*x[:, 0]**2))+(48*x[:, 1]))-((36*x[:, 0])*x[:, 1]))+(27*x[:, 1]**2)))+(((2*x[:, 0])-(3*x[:, 1]))**2*(((-32)+(12*(x[:, 0]**2*(2*(1/x[:, 0])))))-(36*x[:, 1]))))) + grad[:, 0, 1] = ((((((x[:, 0]+x[:, 1])+1)**2*(2*(1/((x[:, 0]+x[:, 1])+1))))*(((((19-(14*x[:, 0]))+(3*x[:, 0]**2))-(14*x[:, 1]))+((6*x[:, 0])*x[:, 1]))+(3*x[:, 1]**2)))+(((x[:, 0]+x[:, 1])+1)**2*(((-14)+(6*x[:, 0]))+(3*(x[:, 1]**2*(2*(1/x[:, 1])))))))*(30+(((2*x[:, 0])-(3*x[:, 1]))**2*(((((18-(32*x[:, 0]))+(12*x[:, 0]**2))+(48*x[:, 1]))-((36*x[:, 0])*x[:, 1]))+(27*x[:, 1]**2)))))+((1+(((x[:, 0]+x[:, 1])+1)**2*(((((19-(14*x[:, 0]))+(3*x[:, 0]**2))-(14*x[:, 1]))+((6*x[:, 0])*x[:, 1]))+(3*x[:, 1]**2))))*(((((2*x[:, 0])-(3*x[:, 1]))**2*(2*((1/((2*x[:, 0])-(3*x[:, 1])))*(-3))))*(((((18-(32*x[:, 0]))+(12*x[:, 0]**2))+(48*x[:, 1]))-((36*x[:, 0])*x[:, 1]))+(27*x[:, 1]**2)))+(((2*x[:, 0])-(3*x[:, 1]))**2*((48-(36*x[:, 0]))+(27*(x[:, 1]**2*(2*(1/x[:, 1])))))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class HimmelBlau(Function): + r"""HimmelBlau function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_H.html#go_benchmark.HimmelBlau] + + .. math:: + f(x)=(x_1^2 + x_2 - c_1)^2 + (x_1 + x_2^2 - c_2)^2 + + + Default constant values are :math:`c = (11.0, 7.0)`. + + """ + def __init__(self, c1=11.0, c2=7.0, name="HimmelBlau"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-6, 6], [-6, 6]])) + + def __call__(self, x): + return (((x[:, 0]**2+x[:, 1])-self.c1)**2+((x[:, 0]+x[:, 1]**2)-self.c2)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((x[:, 0]**2+x[:, 1])-self.c1)**2*(2*((1/((x[:, 0]**2+x[:, 1])-self.c1))*(x[:, 0]**2*(2*(1/x[:, 0]))))))+(((x[:, 0]+x[:, 1]**2)-self.c2)**2*(2*(1/((x[:, 0]+x[:, 1]**2)-self.c2)))) + grad[:, 0, 1] = (((x[:, 0]**2+x[:, 1])-self.c1)**2*(2*(1/((x[:, 0]**2+x[:, 1])-self.c1))))+(((x[:, 0]+x[:, 1]**2)-self.c2)**2*(2*((1/((x[:, 0]+x[:, 1]**2)-self.c2))*(x[:, 1]**2*(2*(1/x[:, 1])))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Hosaki(Function): + r"""Hosaki function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_H.html#go_benchmark.Hosaki] + + .. math:: + f(x)=\left ( c_1 - c_2x_1 + c_3x_1^2 - c_4x_1^3 + c_5x_1^4 \right )x_2^2e^{-x_1} + + + Default constant values are :math:`c = (1.0, 8.0, 7.0, 8./3., 0.25)`. + + """ + def __init__(self, c1=1.0, c2=8.0, c3=7.0, c4=8./3., c5=0.25, name="Hosaki"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[0, 10], [0, 10]])) + + def __call__(self, x): + return ((((((self.c1-(self.c2*x[:, 0]))+(self.c3*x[:, 0]**2))-(self.c4*x[:, 0]**3))+(self.c5*x[:, 0]**4))*x[:, 1]**2)*np.exp(-x[:, 0])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((((((-self.c2)+(self.c3*(x[:, 0]**2*(2*(1/x[:, 0])))))-(self.c4*(x[:, 0]**3*(3*(1/x[:, 0])))))+(self.c5*(x[:, 0]**4*(4*(1/x[:, 0])))))*x[:, 1]**2)*np.exp(-x[:, 0]))+((((((self.c1-(self.c2*x[:, 0]))+(self.c3*x[:, 0]**2))-(self.c4*x[:, 0]**3))+(self.c5*x[:, 0]**4))*x[:, 1]**2)*(-np.exp(-x[:, 0]))) + grad[:, 0, 1] = (((((self.c1-(self.c2*x[:, 0]))+(self.c3*x[:, 0]**2))-(self.c4*x[:, 0]**3))+(self.c5*x[:, 0]**4))*(x[:, 1]**2*(2*(1/x[:, 1]))))*np.exp(-x[:, 0]) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + + +class Keane(Function): + r""" + Keane function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_K.html#go_benchmark.Keane] + + A multimodal minimization function + + .. math:: + f(x)=\frac{\sin^2(x_1 - x_2)\sin^2(x_1 + x_2)}{\sqrt{x_1^2 + x_2^2}} + + """ + def __init__(self, name="Keane"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[1.e-8, 10], [1.e-8, 10]])) + + def __call__(self, x): + return ((np.sin(x[:, 0]-x[:, 1])**2*np.sin(x[:, 0]+x[:, 1])**2)/np.sqrt(x[:, 0]**2+x[:, 1]**2)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = 2.*np.sin(x[:, 0]-x[:, 1])*np.sin(x[:, 0]+x[:, 1])*np.sin(2.*x[:, 0])/np.sqrt(x[:, 0]**2+x[:, 1]**2) - self.__call__(x)[:, 0]*x[:, 0]/(x[:, 0]**2 + x[:, 1]**2) + grad[:, 0, 1] = - 2.*np.sin(x[:, 0]-x[:, 1])*np.sin(x[:, 0]+x[:, 1])*np.sin(2.*x[:,1 ])/np.sqrt(x[:, 0]**2+x[:, 1]**2) - self.__call__(x)[:, 0]*x[:, 1]/(x[:, 0]**2 + x[:, 1]**2) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Leon(Function): + r"""Leon function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_L.html#go_benchmark.Leon] + + .. math:: + f(x)= \left(1 - x_{1}\right)^{2} + c_1 \left(x_{2} - x_{1}^{2} \right)^{2} + + + Default constant values are :math:`c = 100`. + + """ + def __init__(self, c1=100, name="Leon"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-1.2, 1.2], [-1.2, 1.2]])) + + def __call__(self, x): + return ((1-x[:, 0])**2+(self.c1*(x[:, 1]-x[:, 0]**2)**2)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((1-x[:, 0])**2*(2*(-(1/(1-x[:, 0])))))+(self.c1*((x[:, 1]-x[:, 0]**2)**2*(2*((1/(x[:, 1]-x[:, 0]**2))*(-(x[:, 0]**2*(2*(1/x[:, 0])))))))) + grad[:, 0, 1] = self.c1*((x[:, 1]-x[:, 0]**2)**2*(2*(1/(x[:, 1]-x[:, 0]**2)))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Levy13(Function): + r"""Levy13 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_L.html#go_benchmark.Levy13] + + .. math:: + f(x)=\left(x_{1} -c_1\right)^{2} \left[\sin^{2}\left(c_2 \pi x_{2}\right) + c_3\right] + \left(x_{2} -c_4\right)^{2} \left[\sin^{2}\left(c_5 \pi x_{2}\right) + c_6\right] + \sin^{2}\left(c_7 \pi x_{1}\right) + + + Default constant values are :math:`c = (1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0)`. + + """ + def __init__(self, c1=1.0, c2=3.0, c3=1.0, c4=1.0, c5=2.0, c6=1.0, c7=3.0, name="Levy13"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7 = c1, c2, c3, c4, c5, c6, c7 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((((x[:, 0]-self.c1)**2*(np.sin((self.c2*np.pi)*x[:, 1])**2+self.c3))+((x[:, 1]-self.c4)**2*(np.sin((self.c5*np.pi)*x[:, 1])**2+self.c6)))+np.sin((self.c7*np.pi)*x[:, 0])**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((x[:, 0]-self.c1)**2*(2*(1/(x[:, 0]-self.c1))))*(np.sin((self.c2*np.pi)*x[:, 1])**2+self.c3))+(np.sin((self.c7*np.pi)*x[:, 0])**2*(2*((1/np.sin((self.c7*np.pi)*x[:, 0]))*(np.cos((self.c7*np.pi)*x[:, 0])*(self.c7*np.pi))))) + grad[:, 0, 1] = ((x[:, 0]-self.c1)**2*(np.sin((self.c2*np.pi)*x[:, 1])**2*(2*((1/np.sin((self.c2*np.pi)*x[:, 1]))*(np.cos((self.c2*np.pi)*x[:, 1])*(self.c2*np.pi))))))+((((x[:, 1]-self.c4)**2*(2*(1/(x[:, 1]-self.c4))))*(np.sin((self.c5*np.pi)*x[:, 1])**2+self.c6))+((x[:, 1]-self.c4)**2*(np.sin((self.c5*np.pi)*x[:, 1])**2*(2*((1/np.sin((self.c5*np.pi)*x[:, 1]))*(np.cos((self.c5*np.pi)*x[:, 1])*(self.c5*np.pi))))))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Lim(Function): + r"""Lim function + + Generalized nonpolynomial trigonometric 2d function + + .. math:: + f(x)=a((b+cx_1\sin(dx_1))(f+e^{gx_2})+h) + + + Default constant values are :math:`c = (1/6, 30., 5., 5., 4., -5., -100.)`. + + """ + def __init__(self, c1=1/6, c2=30., c3=5., c4=5., c5=4., c6=-5., c7=-100., name="Lim"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7 = c1, c2, c3, c4, c5, c6, c7 + self.dim = 2 + self.outdim = 1 + + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 1.])) + + def __call__(self, x): + return (self.c1 * ((self.c2 + self.c3 * x[:, 0] * np.sin(self.c4 * x[:, 0])) * (self.c5 + np.exp(self.c6 * x[:, 1])) + self.c7))[:, np.newaxis] + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + x1, x2 = x[:, 0], x[:, 1] + + grad[:, 0, 0] = self.c1 * (self.c3 * (np.sin(self.c4 * x1) + self.c4 * x1 * np.cos(self.c4 * x1)) * (self.c5 + np.exp(self.c6 * x2))) + grad[:, 0, 1] = self.c1 * ((self.c2 + self.c3 * x1 * np.sin(self.c4 * x1)) * self.c6 * np.exp(self.c6 * x2)) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Matyas(Function): + r"""Matyas function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Matyas] + + .. math:: + f(x)=c_1(x_1^2 + x_2^2) - c_2x_1x_2 + + + Default constant values are :math:`c = (0.26, 0.48)`. + + """ + def __init__(self, c1=0.26, c2=0.48, name="Matyas"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((self.c1*(x[:, 0]**2+x[:, 1]**2))-((self.c2*x[:, 0])*x[:, 1])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (self.c1*(x[:, 0]**2*(2*(1/x[:, 0]))))-(self.c2*x[:, 1]) + grad[:, 0, 1] = (self.c1*(x[:, 1]**2*(2*(1/x[:, 1]))))-(self.c2*x[:, 0]) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Mishra03(Function): + r""" + Mishra03 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Mishra03] + + A multimodal minimization function + + .. math:: + f(x)=\sqrt{|\cos{\sqrt{x_1^2 + x_2^2}}|} + c_1(x_1 + x_2) + + + Default constant value is :math:`c = 0.01`. + """ + def __init__(self, c1=0.01, name="Mishra03"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (np.sqrt(np.abs(np.cos(np.sqrt(x[:, 0]**2+x[:, 1]**2))))+self.c1*(x[:, 0]+x[:, 1])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (-0.5*x[:,0]/(self.__call__(x)[:,0]-self.c1*(x[:, 0]+x[:, 1])))*np.sign(np.cos(np.sqrt(x[:, 0]**2+x[:, 1]**2)))*np.sin(np.sqrt(x[:, 0]**2+x[:, 1]**2))/np.sqrt(x[:, 0]**2+x[:, 1]**2)+self.c1 + grad[:, 0, 1] = (-0.5*x[:,1]/(self.__call__(x)[:,0]-self.c1*(x[:, 0]+x[:, 1])))*np.sign(np.cos(np.sqrt(x[:, 0]**2+x[:, 1]**2)))*np.sin(np.sqrt(x[:, 0]**2+x[:, 1]**2))/np.sqrt(x[:, 0]**2+x[:, 1]**2)+self.c1 + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class Mishra04(Function): + r""" + Mishra04 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Mishra04] + + A multimodal minimization function + + .. math:: + f(x)=\sqrt{|\sin{\sqrt{x_1^2 + x_2^2}}|} + c_1(x_1 + x_2) + + + Default constant value is :math:`c = 0.01`. + """ + def __init__(self, c1=0.01, name="Mishra04"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (np.sqrt(np.abs(np.sin(np.sqrt(x[:, 0]**2+x[:, 1]**2))))+self.c1*(x[:, 0]+x[:, 1])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (0.5*x[:,0]/(self.__call__(x)[:,0]-self.c1*(x[:, 0]+x[:, 1])))*np.sign(np.sin(np.sqrt(x[:, 0]**2+x[:, 1]**2)))*np.cos(np.sqrt(x[:, 0]**2+x[:, 1]**2))/np.sqrt(x[:, 0]**2+x[:, 1]**2)+self.c1 + grad[:, 0, 1] = (0.5*x[:,1]/(self.__call__(x)[:,0]-self.c1*(x[:, 0]+x[:, 1])))*np.sign(np.sin(np.sqrt(x[:, 0]**2+x[:, 1]**2)))*np.cos(np.sqrt(x[:, 0]**2+x[:, 1]**2))/np.sqrt(x[:, 0]**2+x[:, 1]**2)+self.c1 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Mishra05(Function): + r""" + Mishra05 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Mishra05] + + A multimodal minimization function + + .. math:: + f(x)=\left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2) + \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2 + c_1(x_1 + x_2) + + + Default constant value is :math:`c = 0.01`. + + """ + def __init__(self, c1=0.01, name="Mishra05"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2+(self.c1*(x[:, 0]+x[:, 1]))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2*(2*((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0]))*(((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 0])))))))))+(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 0])))))))))+1))))+self.c1 + grad[:, 0, 1] = (((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2*(2*((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0]))*((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 1])))))))))+(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 1]))))))))))))+self.c1 + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Mishra06(Function): + r""" + Mishra06 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Mishra06] + + A multimodal minimization function + + .. math:: + f(x)=-\log{\left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2) - \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2} + c_1 \left[(x_1 -c_2)^2 + (x_2 - c_3)^2 \right] + + + Default constant values are :math:`c = (0.01, 1.0, 1.0)`. + + """ + def __init__(self, c1=0.01, c2=1.0, c3=1.0, name="Mishra06"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((-np.log(((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2))+(self.c1*((x[:, 0]-self.c2)**2+(x[:, 1]-self.c3)**2))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (-((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2)*(((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2*(2*((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0]))*(((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 0])))))))))-(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 0])))))))))+1))))))+(self.c1*((x[:, 0]-self.c2)**2*(2*(1/(x[:, 0]-self.c2))))) + grad[:, 0, 1] = (-((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2)*(((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0])**2*(2*((1/((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2-np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)+x[:, 0]))*((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 1])))))))))-(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 1]))))))))))))))+(self.c1*((x[:, 1]-self.c3)**2*(2*(1/(x[:, 1]-self.c3))))) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class McCormick(Function): + r"""McCormick function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.McCormick] + + .. math:: + f(x)=- x_{1} + c_1 x_{2} + \left(x_{1} - x_{2}\right)^{2} + \sin\left(x_{1} + x_{2}\right) + c_2 + + + Default constant values are :math:`c = (2.0, 1.0)`. + + """ + def __init__(self, c1=2.0, c2=1.0, name="McCormick"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-1.5, 4], [-1.5, 4]])) + + def __call__(self, x): + return (((((-x[:, 0])+(self.c1*x[:, 1]))+(x[:, 0]-x[:, 1])**2)+np.sin(x[:, 0]+x[:, 1]))+self.c2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((-1)+((x[:, 0]-x[:, 1])**2*(2*(1/(x[:, 0]-x[:, 1])))))+np.cos(x[:, 0]+x[:, 1]) + grad[:, 0, 1] = (self.c1+((x[:, 0]-x[:, 1])**2*(2*(-(1/(x[:, 0]-x[:, 1]))))))+np.cos(x[:, 0]+x[:, 1]) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class NewFunction03(Function): + r"""NewFunction03 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_N.html#go_benchmark.NewFunction03] + + .. math:: + f(x)=c_1 x_{1} + c_2 x_{2} + \left[x_{1} + \sin^{2}\left[\left(\cos\left(x_{1}\right) + \cos\left(x_{2}\right)\right)^{2}\right] + \cos^{2}\left[\left(\sin\left(x_{1}\right) + \sin\left(x_{2}\right)\right)^{2}\right]\right]^{2} + + + Default constant values are :math:`c = (0.01, 0.1)`. + + """ + def __init__(self, c1=0.01, c2=0.1, name="NewFunction03"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (((self.c1*x[:, 0])+(self.c2*x[:, 1]))+((x[:, 0]+np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2)+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = self.c1+(((x[:, 0]+np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2)+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)**2*(2*((1/((x[:, 0]+np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2)+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2))*((1+(np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 0]))))))))))+(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 0])))))))))))) + grad[:, 0, 1] = self.c2+(((x[:, 0]+np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2)+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2)**2*(2*((1/((x[:, 0]+np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2)+np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2))*((np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)**2*(2*((1/np.sin((np.cos(x[:, 0])+np.cos(x[:, 1]))**2))*(np.cos((np.cos(x[:, 0])+np.cos(x[:, 1]))**2)*((np.cos(x[:, 0])+np.cos(x[:, 1]))**2*(2*((1/(np.cos(x[:, 0])+np.cos(x[:, 1])))*(-np.sin(x[:, 1])))))))))+(np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2)**2*(2*((1/np.cos((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((-np.sin((np.sin(x[:, 0])+np.sin(x[:, 1]))**2))*((np.sin(x[:, 0])+np.sin(x[:, 1]))**2*(2*((1/(np.sin(x[:, 0])+np.sin(x[:, 1])))*np.cos(x[:, 1])))))))))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Parsopoulos(Function): + r"""Parsopoulos function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Parsopoulos] + + .. math:: + f(x)=\cos(x_1)^2 + \sin(x_2)^2 + + """ + def __init__(self, name="Parsopoulos"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return (np.cos(x[:, 0])**2+np.sin(x[:, 1])**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = np.cos(x[:, 0])**2*(2*((1/np.cos(x[:, 0]))*(-np.sin(x[:, 0])))) + grad[:, 0, 1] = np.sin(x[:, 1])**2*(2*((1/np.sin(x[:, 1]))*np.cos(x[:, 1]))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Price01(Function): + r"""Price01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Price01] + + .. math:: + f(x)=(\abs{ x_1 } - c_1)^2 + (\abs{ x_2 } - c_2)^2 + + + Default constant values are :math:`c = (5, 5)`. + + """ + def __init__(self, c1=5, c2=5, name="Price01"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-500, 500], [-500, 500]])) + + def __call__(self, x): + return ((np.abs(x[:, 0])-self.c1)**2+(np.abs(x[:, 1])-self.c2)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (np.abs(x[:, 0])-self.c1)**2*(2*((1/(np.abs(x[:, 0])-self.c1))*np.sign(x[:, 0]))) + grad[:, 0, 1] = (np.abs(x[:, 1])-self.c2)**2*(2*((1/(np.abs(x[:, 1])-self.c2))*np.sign(x[:, 1]))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Price02(Function): + r"""Price02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Price02] + + .. math:: + f(x)=c_1 + \sin^2(x_1) + \sin^2(x_2) - c_2e^{(-x_1^2 - x_2^2)} + + + Default constant values are :math:`c = (1.0, 0.1)`. + + """ + def __init__(self, c1=1.0, c2=0.1, name="Price02"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return (((self.c1+np.sin(x[:, 0])**2)+np.sin(x[:, 1])**2)-(self.c2*np.exp((-x[:, 0]**2)-x[:, 1]**2))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (np.sin(x[:, 0])**2*(2*((1/np.sin(x[:, 0]))*np.cos(x[:, 0]))))-(self.c2*(np.exp((-x[:, 0]**2)-x[:, 1]**2)*(-(x[:, 0]**2*(2*(1/x[:, 0])))))) + grad[:, 0, 1] = (np.sin(x[:, 1])**2*(2*((1/np.sin(x[:, 1]))*np.cos(x[:, 1]))))-(self.c2*(np.exp((-x[:, 0]**2)-x[:, 1]**2)*(-(x[:, 1]**2*(2*(1/x[:, 1])))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Price03(Function): + r"""Price03 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Price03] + + .. math:: + f(x)=c_1(x_2 - x_1^2)^2 + \left[c_2(x_2 - c_3)^2 - x_1 - c_4 \right]^2 + + + Default constant values are :math:`c = (100, 6.4, 0.5, 0.6)`. + + """ + def __init__(self, c1=100, c2=6.4, c3=0.5, c4=0.6, name="Price03"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-50, 50], [-50, 50]])) + + def __call__(self, x): + return ((self.c1*(x[:, 1]-x[:, 0]**2)**2)+(((self.c2*(x[:, 1]-self.c3)**2)-x[:, 0])-self.c4)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (self.c1*((x[:, 1]-x[:, 0]**2)**2*(2*((1/(x[:, 1]-x[:, 0]**2))*(-(x[:, 0]**2*(2*(1/x[:, 0]))))))))+((((self.c2*(x[:, 1]-self.c3)**2)-x[:, 0])-self.c4)**2*(2*(-(1/(((self.c2*(x[:, 1]-self.c3)**2)-x[:, 0])-self.c4))))) + grad[:, 0, 1] = (self.c1*((x[:, 1]-x[:, 0]**2)**2*(2*(1/(x[:, 1]-x[:, 0]**2)))))+((((self.c2*(x[:, 1]-self.c3)**2)-x[:, 0])-self.c4)**2*(2*((1/(((self.c2*(x[:, 1]-self.c3)**2)-x[:, 0])-self.c4))*(self.c2*((x[:, 1]-self.c3)**2*(2*(1/(x[:, 1]-self.c3)))))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Price04(Function): + r"""Price04 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Price04] + + .. math:: + f(x)=(c_1x_1^3x_2 - x_2^3)^2 + (c_2x_1 - x_2^2 + x_2)^2 + + + Default constant values are :math:`c = (2, 6)`. + + """ + def __init__(self, c1=2, c2=6, name="Price04"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-50, 50], [-50, 50]])) + + def __call__(self, x): + return ((((self.c1*x[:, 0]**3)*x[:, 1])-x[:, 1]**3)**2+(((self.c2*x[:, 0])-x[:, 1]**2)+x[:, 1])**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((((self.c1*x[:, 0]**3)*x[:, 1])-x[:, 1]**3)**2*(2*((1/(((self.c1*x[:, 0]**3)*x[:, 1])-x[:, 1]**3))*((self.c1*(x[:, 0]**3*(3*(1/x[:, 0]))))*x[:, 1]))))+((((self.c2*x[:, 0])-x[:, 1]**2)+x[:, 1])**2*(2*((1/(((self.c2*x[:, 0])-x[:, 1]**2)+x[:, 1]))*self.c2))) + grad[:, 0, 1] = ((((self.c1*x[:, 0]**3)*x[:, 1])-x[:, 1]**3)**2*(2*((1/(((self.c1*x[:, 0]**3)*x[:, 1])-x[:, 1]**3))*((self.c1*x[:, 0]**3)-(x[:, 1]**3*(3*(1/x[:, 1])))))))+((((self.c2*x[:, 0])-x[:, 1]**2)+x[:, 1])**2*(2*((1/(((self.c2*x[:, 0])-x[:, 1]**2)+x[:, 1]))*((-(x[:, 1]**2*(2*(1/x[:, 1]))))+1)))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Quadratic(Function): + r"""Quadratic function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_Q.html#go_benchmark.Quadratic] + + .. math:: + f(x)=-3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2 + 203.64x_2^2 + 182.25x_1x_2 + + """ + def __init__(self, name="Quadratic"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((((((-3803)-(138*x[:, 0]))-(232*x[:, 1]))+(128*x[:, 0]**2))+(203*x[:, 1]**2))+((182*x[:, 0])*x[:, 1])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((-138)+(128*(x[:, 0]**2*(2*(1/x[:, 0])))))+(182*x[:, 1]) + grad[:, 0, 1] = ((-232)+(203*(x[:, 1]**2*(2*(1/x[:, 1])))))+(182*x[:, 0]) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Quadratic2d(Function): + """2d Quadratic function + + .. math:: + f(x)=0.5 (x - c)^T H (x - c) + + where :math:`H` is the Hessian matrix, and :math:`c` is the center. + + """ + def __init__(self, center=[0., 0.], hess=[[1., 0.], [0., 1.]], name='Quadratic2d'): + super().__init__() + + self.center = np.array(center, dtype=float) + self.hess = np.array(hess, dtype=float) + self.cov = np.linalg.inv(self.hess) + + f = 4.0 + domain = np.tile(self.center.reshape(-1,1), (1,2)) + stds = np.sqrt(np.diag(self.cov)) + domain[:,0] -= f * stds + domain[:,1] += f * stds + + + self.setDimDom(domain=domain) + self.name = name + self.outdim = 1 + + return + + def __call__(self, x): + self.checkDim(x) + + nsam = x.shape[0] + yy = np.empty(nsam,) + for i in range(nsam): + yy[i] = 0.5 * np.dot(x[i, :]-self.center, np.dot(self.hess, x[i, :]-self.center)) + + + return yy.reshape(-1,1) + + def grad(self, x): + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + for j in range(self.dim): + for i in range(self.dim): + grad[:, 0, j] += self.hess[j, i] * (x[:, i] - self.center[i]) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class RosenbrockModified(Function): + r"""RosenbrockModified function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_R.html#go_benchmark.RosenbrockModified] + + .. math:: + f(x)=c_1 + c_2(x_2 - x_1^2)^2 + (c_3 - x_1)^2 - c_4 e^{-\frac{(x_1+1)^2 + (x_2 + 1)^2}{c_5}} + + + Default constant values are :math:`c = (74, 100, 1, 400, 0.1)`. + + """ + def __init__(self, c1=74, c2=100, c3=1, c4=400, c5=0.1, name="RosenbrockModified"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-2, 2], [-2, 2]])) + + def __call__(self, x): + return (((self.c1+(self.c2*(x[:, 1]-x[:, 0]**2)**2))+(self.c3-x[:, 0])**2)-(self.c4*np.exp(-(((x[:, 0]+1)**2+(x[:, 1]+1)**2)/self.c5)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((self.c2*((x[:, 1]-x[:, 0]**2)**2*(2*((1/(x[:, 1]-x[:, 0]**2))*(-(x[:, 0]**2*(2*(1/x[:, 0]))))))))+((self.c3-x[:, 0])**2*(2*(-(1/(self.c3-x[:, 0]))))))-(self.c4*(np.exp(-(((x[:, 0]+1)**2+(x[:, 1]+1)**2)/self.c5))*(-((((x[:, 0]+1)**2*(2*(1/(x[:, 0]+1))))*self.c5)/self.c5**2)))) + grad[:, 0, 1] = (self.c2*((x[:, 1]-x[:, 0]**2)**2*(2*(1/(x[:, 1]-x[:, 0]**2)))))-(self.c4*(np.exp(-(((x[:, 0]+1)**2+(x[:, 1]+1)**2)/self.c5))*(-((((x[:, 1]+1)**2*(2*(1/(x[:, 1]+1))))*self.c5)/self.c5**2)))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class RotatedEllipse01(Function): + r"""RotatedEllipse01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_R.html#go_benchmark.RotatedEllipse01] + + .. math:: + f(x)=c_1x_1^2 - c_2 x_1x_2 + c_3x_2^2 + + + Default constant values are :math:`c = (7, 10.392304845413264, 13)`. + + """ + def __init__(self, c1=7, c2=10.392304845413264, c3=13, name="RotatedEllipse01"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-500, 500], [-500, 500]])) + + def __call__(self, x): + return (((self.c1*x[:, 0]**2)-((self.c2*x[:, 0])*x[:, 1]))+(self.c3*x[:, 1]**2)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (self.c1*(x[:, 0]**2*(2*(1/x[:, 0]))))-(self.c2*x[:, 1]) + grad[:, 0, 1] = (-(self.c2*x[:, 0]))+(self.c3*(x[:, 1]**2*(2*(1/x[:, 1])))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class RotatedEllipse02(Function): + r"""RotatedEllipse02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_R.html#go_benchmark.RotatedEllipse02] + + .. math:: + f(x)=x_1^2 - x_1x_2 + x_2^2 + + """ + def __init__(self, name="RotatedEllipse02"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-500, 500], [-500, 500]])) + + def __call__(self, x): + return ((x[:, 0]**2-(x[:, 0]*x[:, 1]))+x[:, 1]**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (x[:, 0]**2*(2*(1/x[:, 0])))-x[:, 1] + grad[:, 0, 1] = (-x[:, 0])+(x[:, 1]**2*(2*(1/x[:, 1]))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Schaffer01(Function): + r"""Schaffer01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_S.html#go_benchmark.Schaffer01] + + .. math:: + f(x)=c_1 + \frac{\sin^2 (x_1^2 + x_2^2)^2 - c_2}{c_3 + c_4(x_1^2 + x_2^2)^2} + + + Default constant values are :math:`c = (0.5, 0.5, 1, 0.001)`. + + """ + def __init__(self, c1=0.5, c2=0.5, c3=1, c4=0.001, name="Schaffer01"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-100, 100], [-100, 100]])) + + def __call__(self, x): + return (self.c1+((np.sin(x[:, 0]**2+x[:, 1]**2)**2**2-self.c2)/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((np.sin(x[:, 0]**2+x[:, 1]**2)**2**2*(2*((1/np.sin(x[:, 0]**2+x[:, 1]**2)**2)*(np.sin(x[:, 0]**2+x[:, 1]**2)**2*(2*((1/np.sin(x[:, 0]**2+x[:, 1]**2))*(np.cos(x[:, 0]**2+x[:, 1]**2)*(x[:, 0]**2*(2*(1/x[:, 0]))))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.sin(x[:, 0]**2+x[:, 1]**2)**2**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 0]**2*(2*(1/x[:, 0])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + grad[:, 0, 1] = (((np.sin(x[:, 0]**2+x[:, 1]**2)**2**2*(2*((1/np.sin(x[:, 0]**2+x[:, 1]**2)**2)*(np.sin(x[:, 0]**2+x[:, 1]**2)**2*(2*((1/np.sin(x[:, 0]**2+x[:, 1]**2))*(np.cos(x[:, 0]**2+x[:, 1]**2)*(x[:, 1]**2*(2*(1/x[:, 1]))))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.sin(x[:, 0]**2+x[:, 1]**2)**2**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 1]**2*(2*(1/x[:, 1])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Schaffer02(Function): + r"""Schaffer02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_S.html#go_benchmark.Schaffer02] + + .. math:: + f(x)=c_1 + \frac{\sin^2 (x_1^2 - x_2^2)^2 - c_2}{c_3 + c_4(x_1^2 + x_2^2)^2} + + + Default constant values are :math:`c = (0.5, 0.5, 1, 0.001)`. + + """ + def __init__(self, c1=0.5, c2=0.5, c3=1, c4=0.001, name="Schaffer02"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-100, 100], [-100, 100]])) + + def __call__(self, x): + return (self.c1+((np.sin(x[:, 0]**2-x[:, 1]**2)**2**2-self.c2)/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((np.sin(x[:, 0]**2-x[:, 1]**2)**2**2*(2*((1/np.sin(x[:, 0]**2-x[:, 1]**2)**2)*(np.sin(x[:, 0]**2-x[:, 1]**2)**2*(2*((1/np.sin(x[:, 0]**2-x[:, 1]**2))*(np.cos(x[:, 0]**2-x[:, 1]**2)*(x[:, 0]**2*(2*(1/x[:, 0]))))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.sin(x[:, 0]**2-x[:, 1]**2)**2**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 0]**2*(2*(1/x[:, 0])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + grad[:, 0, 1] = (((np.sin(x[:, 0]**2-x[:, 1]**2)**2**2*(2*((1/np.sin(x[:, 0]**2-x[:, 1]**2)**2)*(np.sin(x[:, 0]**2-x[:, 1]**2)**2*(2*((1/np.sin(x[:, 0]**2-x[:, 1]**2))*(np.cos(x[:, 0]**2-x[:, 1]**2)*(-(x[:, 1]**2*(2*(1/x[:, 1])))))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.sin(x[:, 0]**2-x[:, 1]**2)**2**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 1]**2*(2*(1/x[:, 1])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Schaffer04(Function): + r"""Schaffer04 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_S.html#go_benchmark.Schaffer04] + + .. math:: + f(x)=c_1 + \frac{\cos^2 \left( \sin(x_1^2 - x_2^2) \right ) - c_2}{c_3 + c_4(x_1^2 + x_2^2)^2} + + + Default constant values are :math:`c = (0.5, 0.5, 1, 0.001)`. + + """ + def __init__(self, c1=0.5, c2=0.5, c3=1, c4=0.001, name="Schaffer04"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-100, 100], [-100, 100]])) + + def __call__(self, x): + return (self.c1+((np.cos(np.sin(x[:, 0]**2-x[:, 1]**2))**2-self.c2)/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((np.cos(np.sin(x[:, 0]**2-x[:, 1]**2))**2*(2*((1/np.cos(np.sin(x[:, 0]**2-x[:, 1]**2)))*((-np.sin(np.sin(x[:, 0]**2-x[:, 1]**2)))*(np.cos(x[:, 0]**2-x[:, 1]**2)*(x[:, 0]**2*(2*(1/x[:, 0]))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.cos(np.sin(x[:, 0]**2-x[:, 1]**2))**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 0]**2*(2*(1/x[:, 0])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + grad[:, 0, 1] = (((np.cos(np.sin(x[:, 0]**2-x[:, 1]**2))**2*(2*((1/np.cos(np.sin(x[:, 0]**2-x[:, 1]**2)))*((-np.sin(np.sin(x[:, 0]**2-x[:, 1]**2)))*(np.cos(x[:, 0]**2-x[:, 1]**2)*(-(x[:, 1]**2*(2*(1/x[:, 1])))))))))*(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2)))-((np.cos(np.sin(x[:, 0]**2-x[:, 1]**2))**2-self.c2)*(self.c4*((x[:, 0]**2+x[:, 1]**2)**2*(2*((1/(x[:, 0]**2+x[:, 1]**2))*(x[:, 1]**2*(2*(1/x[:, 1])))))))))/(self.c3+(self.c4*(x[:, 0]**2+x[:, 1]**2)**2))**2 + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class Schwefel36(Function): + r"""Schwefel36 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_S.html#go_benchmark.Schwefel36] + + .. math:: + f(x)=-x_1x_2(c_1 - c_2x_1 - c_3x_2) + + + Default constant values are :math:`c = (72, 2, 2)`. + + """ + def __init__(self, c1=72, c2=2, c3=2, name="Schwefel36"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[0, 500], [0, 500]])) + + def __call__(self, x): + return (((-x[:, 0])*x[:, 1])*((self.c1-(self.c2*x[:, 0]))-(self.c3*x[:, 1]))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((-x[:, 1])*((self.c1-(self.c2*x[:, 0]))-(self.c3*x[:, 1])))+(((-x[:, 0])*x[:, 1])*(-self.c2)) + grad[:, 0, 1] = ((-x[:, 0])*((self.c1-(self.c2*x[:, 0]))-(self.c3*x[:, 1])))+(((-x[:, 0])*x[:, 1])*(-self.c3)) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class SixHumpCamel(Function): + r"""SixHumpCamel function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_S.html#go_benchmark.SixHumpCamel] + + .. math:: + f(x)=c_1x_1^2+x_1x_2-c_2x_2^2-c_3x_1^4+c_4x_2^4+c_5x_1^6 + + + Default constant values are :math:`c = (4, 4, 2.1, 4, 1./3.)`. + + """ + def __init__(self, c1=4, c2=4, c3=2.1, c4=4, c5=1./3., name="SixHumpCamel"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return ((((((self.c1*x[:, 0]**2)+(x[:, 0]*x[:, 1]))-(self.c2*x[:, 1]**2))-(self.c3*x[:, 0]**4))+(self.c4*x[:, 1]**4))+(self.c5*x[:, 0]**6)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((self.c1*(x[:, 0]**2*(2*(1/x[:, 0]))))+x[:, 1])-(self.c3*(x[:, 0]**4*(4*(1/x[:, 0])))))+(self.c5*(x[:, 0]**6*(6*(1/x[:, 0])))) + grad[:, 0, 1] = (x[:, 0]-(self.c2*(x[:, 1]**2*(2*(1/x[:, 1])))))+(self.c4*(x[:, 1]**4*(4*(1/x[:, 1])))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class ThreeHumpCamel(Function): + r"""ThreeHumpCamel function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_T.html#go_benchmark.ThreeHumpCamel] + + .. math:: + f(x)=c_1x_1^2 - c_2x_1^4 + \frac{x_1^6}{c_3} + x_1x_2 + x_2^2 + + + Default constant values are :math:`c = (2, 1.05, 6)`. + + """ + def __init__(self, c1=2, c2=1.05, c3=6, name="ThreeHumpCamel"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return (((((self.c1*x[:, 0]**2)-(self.c2*x[:, 0]**4))+(x[:, 0]**6/self.c3))+(x[:, 0]*x[:, 1]))+x[:, 1]**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((self.c1*(x[:, 0]**2*(2*(1/x[:, 0]))))-(self.c2*(x[:, 0]**4*(4*(1/x[:, 0])))))+(((x[:, 0]**6*(6*(1/x[:, 0])))*self.c3)/self.c3**2))+x[:, 1] + grad[:, 0, 1] = x[:, 0]+(x[:, 1]**2*(2*(1/x[:, 1]))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Treccani(Function): + r"""Treccani function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_T.html#go_benchmark.Treccani] + + .. math:: + f(x)=x_1^4 + c_1x_1^3 + c_2x_1^2 + x_2^2 + + + Default constant values are :math:`c = (4, 4)`. + + """ + def __init__(self, c1=4, c2=4, name="Treccani"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return (((x[:, 0]**4+(self.c1*x[:, 0]**3))+(self.c2*x[:, 0]**2))+x[:, 1]**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((x[:, 0]**4*(4*(1/x[:, 0])))+(self.c1*(x[:, 0]**3*(3*(1/x[:, 0])))))+(self.c2*(x[:, 0]**2*(2*(1/x[:, 0])))) + grad[:, 0, 1] = x[:, 1]**2*(2*(1/x[:, 1])) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Trefethen(Function): + r"""Trefethen function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_T.html#go_benchmark.Trefethen] + + .. math:: + f(x)=0.25 x_{1}^{2} + 0.25 x_{2}^{2} + e^{\sin\left(50 x_{1}\right)} - \sin\left(10 x_{1} + 10 x_{2}\right) + \sin\left(60 e^{x_{2}}\right) + \sin\left[70 \sin\left(x_{1}\right)\right] + \sin\left[\sin\left(80 x_{2}\right)\right] + + """ + def __init__(self, name="Trefethen"): + super().__init__(name=name) + + + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((((np.exp(np.sin(50*x[:, 0]))-np.sin((10*x[:, 0])+(10*x[:, 1])))+np.sin(60*np.exp(x[:, 1])))+np.sin(70*np.sin(x[:, 0])))+np.sin(np.sin(80*x[:, 1]))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((np.exp(np.sin(50*x[:, 0]))*(np.cos(50*x[:, 0])*50))-(np.cos((10*x[:, 0])+(10*x[:, 1]))*10))+(np.cos(70*np.sin(x[:, 0]))*(70*np.cos(x[:, 0]))) + grad[:, 0, 1] = ((-(np.cos((10*x[:, 0])+(10*x[:, 1]))*10))+(np.cos(60*np.exp(x[:, 1]))*(60*np.exp(x[:, 1]))))+(np.cos(np.sin(80*x[:, 1]))*(np.cos(80*x[:, 1])*80)) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Ursem01(Function): + r"""Ursem01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_U.html#go_benchmark.Ursem01] + + .. math:: + f(x)=- \sin(c_1x_1 - c_2 \pi) - c_3 \cos(x_2) - c_4x_1 + + + Default constant values are :math:`c = (2, 0.5, 3, 0.5)`. + + """ + def __init__(self, c1=2, c2=0.5, c3=3, c4=0.5, name="Ursem01"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-2.5, 3], [-2, 2]])) + + def __call__(self, x): + return (((-np.sin((self.c1*x[:, 0])-(self.c2*np.pi)))-(self.c3*np.cos(x[:, 1])))-(self.c4*x[:, 0])).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (-(np.cos((self.c1*x[:, 0])-(self.c2*np.pi))*self.c1))-self.c4 + grad[:, 0, 1] = -(self.c3*(-np.sin(x[:, 1]))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Ursem03(Function): + r"""Ursem03 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_U.html#go_benchmark.Ursem03] + + .. math:: + f(x)=- \sin(c_1 \pi x_1 + c_2 \pi) \frac{c_3 - |x_1|}{c_4} \frac{c_5 - |x_1|}{c_6} - \sin(c_7 \pi x_2 + c_8 \pi) \frac{c_9 - |x_2|}{c_{10}} \frac{c_{11} - |x_2|}{c_{12}} + + + Default constant values are :math:`c = (2.2, 0.5, 2, 2, 3, 2, 2.2, 0.5, 2, 2, 3, 2)`. + + """ + def __init__(self, c1=2.2, c2=0.5, c3=2, c4=2, c5=3, c6=2, c7=2.2, c8=0.5, c9=2, c10=2, c11=3, c12=2, name="Ursem03"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8, self.c9, self.c10, self.c11, self.c12 = c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-2, 2], [-1.5, 1.5]])) + + def __call__(self, x): + return ((((-np.sin(((self.c1*np.pi)*x[:, 0])+(self.c2*np.pi)))*((self.c3-np.abs(x[:, 0]))/self.c4))*((self.c5-np.abs(x[:, 0]))/self.c6))-((np.sin(((self.c7*np.pi)*x[:, 1])+(self.c8*np.pi))*((self.c9-np.abs(x[:, 1]))/self.c10))*((self.c11-np.abs(x[:, 1]))/self.c12))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((((-(np.cos(((self.c1*np.pi)*x[:, 0])+(self.c2*np.pi))*(self.c1*np.pi)))*((self.c3-np.abs(x[:, 0]))/self.c4))+((-np.sin(((self.c1*np.pi)*x[:, 0])+(self.c2*np.pi)))*(((-np.sign(x[:, 0]))*self.c4)/self.c4**2)))*((self.c5-np.abs(x[:, 0]))/self.c6))+(((-np.sin(((self.c1*np.pi)*x[:, 0])+(self.c2*np.pi)))*((self.c3-np.abs(x[:, 0]))/self.c4))*(((-np.sign(x[:, 0]))*self.c6)/self.c6**2)) + grad[:, 0, 1] = -(((((np.cos(((self.c7*np.pi)*x[:, 1])+(self.c8*np.pi))*(self.c7*np.pi))*((self.c9-np.abs(x[:, 1]))/self.c10))+(np.sin(((self.c7*np.pi)*x[:, 1])+(self.c8*np.pi))*(((-np.sign(x[:, 1]))*self.c10)/self.c10**2)))*((self.c11-np.abs(x[:, 1]))/self.c12))+((np.sin(((self.c7*np.pi)*x[:, 1])+(self.c8*np.pi))*((self.c9-np.abs(x[:, 1]))/self.c10))*(((-np.sign(x[:, 1]))*self.c12)/self.c12**2))) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class Ursem04(Function): + r""" + Ursem04 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_U.html#go_benchmark.Ursem04] + + A multimodal minimization function + + .. math:: + f(x)=-c_1 \sin(c_2 \pi x_1 + c_3 \pi) \frac{c_4 - \sqrt{x_1^2 + x_2^2}}{c_5} + + Default constant values are :math:`c = (3.0, 0.5, 0.5, 2.0, 4.0)`. + """ + def __init__(self, c1=3, c2=0.5, c3=0.5, c4=2, c5=4, name="Ursem04"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5 = c1, c2, c3, c4, c5 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-2, 2], [-2, 2]])) + + def __call__(self, x): + return (-self.c1*np.sin(self.c2*np.pi*x[:, 0]+self.c3*np.pi)*((self.c4-np.sqrt(x[:, 0]**2+x[:, 1]**2))/self.c5)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = -self.c1*np.cos(self.c2*np.pi*x[:, 0]+self.c3*np.pi)*self.c2*np.pi*(self.c4-np.sqrt(x[:, 0]**2+x[:, 1]**2))/self.c5 + self.c1*np.sin(self.c2*np.pi*x[:, 0]+self.c3*np.pi)*x[:,0] / (self.c5*np.sqrt(x[:, 0]**2+x[:, 1]**2)) + grad[:, 0, 1] = self.c1*np.sin(self.c2*np.pi*x[:, 0]+self.c3*np.pi)*x[:,1] / (self.c5*np.sqrt(x[:, 0]**2+x[:, 1]**2)) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + +class UrsemWaves(Function): + r"""UrsemWaves function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_U.html#go_benchmark.UrsemWaves] + + .. math:: + f(x)=-c_1x_1^2 + (x_2^2 - c_2x_2^2)x_1x_2 + c_3 \cos \left[ c_4x_1 - x_2^2(c_5 + x_1) \right ] \sin(c_6 \pi x_1) + + + Default constant values are :math:`c = (0.9, 4.5, 4.7, 2, 2, 2.5)`. + + """ + def __init__(self, c1=0.9, c2=4.5, c3=4.7, c4=2, c5=2, c6=2.5, name="UrsemWaves"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6 = c1, c2, c3, c4, c5, c6 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-0.9, 1.2], [-1.2, 1.2]])) + + def __call__(self, x): + return ((((-self.c1)*x[:, 0]**2)+(((x[:, 1]**2-(self.c2*x[:, 1]**2))*x[:, 0])*x[:, 1]))+((self.c3*np.cos((self.c4*x[:, 0])-(x[:, 1]**2*(self.c5+x[:, 0]))))*np.sin((self.c6*np.pi)*x[:, 0]))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((-self.c1)*(x[:, 0]**2*(2*(1/x[:, 0]))))+((x[:, 1]**2-(self.c2*x[:, 1]**2))*x[:, 1]))+(((self.c3*((-np.sin((self.c4*x[:, 0])-(x[:, 1]**2*(self.c5+x[:, 0]))))*(self.c4-x[:, 1]**2)))*np.sin((self.c6*np.pi)*x[:, 0]))+((self.c3*np.cos((self.c4*x[:, 0])-(x[:, 1]**2*(self.c5+x[:, 0]))))*(np.cos((self.c6*np.pi)*x[:, 0])*(self.c6*np.pi)))) + grad[:, 0, 1] = (((((x[:, 1]**2*(2*(1/x[:, 1])))-(self.c2*(x[:, 1]**2*(2*(1/x[:, 1])))))*x[:, 0])*x[:, 1])+((x[:, 1]**2-(self.c2*x[:, 1]**2))*x[:, 0]))+((self.c3*((-np.sin((self.c4*x[:, 0])-(x[:, 1]**2*(self.c5+x[:, 0]))))*(-((x[:, 1]**2*(2*(1/x[:, 1])))*(self.c5+x[:, 0])))))*np.sin((self.c6*np.pi)*x[:, 0])) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class VenterSobiezcczanskiSobieski(Function): + r"""VenterSobiezcczanskiSobieski function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_V.html#go_benchmark.VenterSobiezcczanskiSobieski] + + .. math:: + f(x)=x_1^2 - c_1 \cos^2(x_1) - c_2 \cos(x_1^2/c_3) + x_2^2 - c_4 \cos^2(x_2) - c_5 \cos(x_2^2/c_6) + + + Default constant values are :math:`c = (100, 100, 30, 100, 100, 30)`. + + """ + def __init__(self, c1=100, c2=100, c3=30, c4=100, c5=100, c6=30, name="VenterSobiezcczanskiSobieski"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6 = c1, c2, c3, c4, c5, c6 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-50, 50], [-50, 50]])) + + def __call__(self, x): + return (((((x[:, 0]**2-(self.c1*np.cos(x[:, 0])**2))-(self.c2*np.cos(x[:, 0]**2/self.c3)))+x[:, 1]**2)-(self.c4*np.cos(x[:, 1])**2))-(self.c5*np.cos(x[:, 1]**2/self.c6))).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((x[:, 0]**2*(2*(1/x[:, 0])))-(self.c1*(np.cos(x[:, 0])**2*(2*((1/np.cos(x[:, 0]))*(-np.sin(x[:, 0])))))))-(self.c2*((-np.sin(x[:, 0]**2/self.c3))*(((x[:, 0]**2*(2*(1/x[:, 0])))*self.c3)/self.c3**2))) + grad[:, 0, 1] = ((x[:, 1]**2*(2*(1/x[:, 1])))-(self.c4*(np.cos(x[:, 1])**2*(2*((1/np.cos(x[:, 1]))*(-np.sin(x[:, 1])))))))-(self.c5*((-np.sin(x[:, 1]**2/self.c6))*(((x[:, 1]**2*(2*(1/x[:, 1])))*self.c6)/self.c6**2))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class WayburnSeader01(Function): + r"""WayburnSeader01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_W.html#go_benchmark.WayburnSeader01] + + .. math:: + f(x)=(x_1^6 + x_2^4 - c_1)^2 + (c_2x_1 + x_2 - c_3)^2 + + + Default constant values are :math:`c = (17, 2, 4)`. + + """ + def __init__(self, c1=17, c2=2, c3=4, name="WayburnSeader01"): + super().__init__(name=name) + + self.c1, self.c2, self.c3 = c1, c2, c3 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-5, 5], [-5, 5]])) + + def __call__(self, x): + return (((x[:, 0]**6+x[:, 1]**4)-self.c1)**2+(((self.c2*x[:, 0])+x[:, 1])-self.c3)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = (((x[:, 0]**6+x[:, 1]**4)-self.c1)**2*(2*((1/((x[:, 0]**6+x[:, 1]**4)-self.c1))*(x[:, 0]**6*(6*(1/x[:, 0]))))))+((((self.c2*x[:, 0])+x[:, 1])-self.c3)**2*(2*((1/(((self.c2*x[:, 0])+x[:, 1])-self.c3))*self.c2))) + grad[:, 0, 1] = (((x[:, 0]**6+x[:, 1]**4)-self.c1)**2*(2*((1/((x[:, 0]**6+x[:, 1]**4)-self.c1))*(x[:, 1]**4*(4*(1/x[:, 1]))))))+((((self.c2*x[:, 0])+x[:, 1])-self.c3)**2*(2*(1/(((self.c2*x[:, 0])+x[:, 1])-self.c3)))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class WayburnSeader02(Function): + r"""WayburnSeader02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_W.html#go_benchmark.WayburnSeader02] + + .. math:: + f(x)=\left[ c_1 - c_2(x_1 - c_3)^2 - c_4(x_2 - c_5)^2 \right]^2 + (x_2 - c_6)^2 + + + Default constant values are :math:`c = (1.613, 4, 0.3125, 4, 1.625, 1)`. + + """ + def __init__(self, c1=1.613, c2=4, c3=0.3125, c4=4, c5=1.625, c6=1, name="WayburnSeader02"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6 = c1, c2, c3, c4, c5, c6 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-500, 500], [-500, 500]])) + + def __call__(self, x): + return (((self.c1-(self.c2*(x[:, 0]-self.c3)**2))-(self.c4*(x[:, 1]-self.c5)**2))**2+(x[:, 1]-self.c6)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((self.c1-(self.c2*(x[:, 0]-self.c3)**2))-(self.c4*(x[:, 1]-self.c5)**2))**2*(2*((1/((self.c1-(self.c2*(x[:, 0]-self.c3)**2))-(self.c4*(x[:, 1]-self.c5)**2)))*(-(self.c2*((x[:, 0]-self.c3)**2*(2*(1/(x[:, 0]-self.c3)))))))) + grad[:, 0, 1] = (((self.c1-(self.c2*(x[:, 0]-self.c3)**2))-(self.c4*(x[:, 1]-self.c5)**2))**2*(2*((1/((self.c1-(self.c2*(x[:, 0]-self.c3)**2))-(self.c4*(x[:, 1]-self.c5)**2)))*(-(self.c4*((x[:, 1]-self.c5)**2*(2*(1/(x[:, 1]-self.c5)))))))))+((x[:, 1]-self.c6)**2*(2*(1/(x[:, 1]-self.c6)))) + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + +class Zettl(Function): + r"""Zettl function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_Z.html#go_benchmark.Zettl] + + .. math:: + f(x)=c_1 x_{1} + \left(x_{1}^{2} - c_2 x_{1} + x_{2}^{2}\right)^{2} + + + Default constant values are :math:`c = (0.25, 2)`. + + """ + def __init__(self, c1=0.25, c2=2, name="Zettl"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-1, 5], [-1, 5]])) + + def __call__(self, x): + return ((self.c1*x[:, 0])+((x[:, 0]**2-(self.c2*x[:, 0]))+x[:, 1]**2)**2).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = self.c1+(((x[:, 0]**2-(self.c2*x[:, 0]))+x[:, 1]**2)**2*(2*((1/((x[:, 0]**2-(self.c2*x[:, 0]))+x[:, 1]**2))*((x[:, 0]**2*(2*(1/x[:, 0])))-self.c2)))) + grad[:, 0, 1] = ((x[:, 0]**2-(self.c2*x[:, 0]))+x[:, 1]**2)**2*(2*((1/((x[:, 0]**2-(self.c2*x[:, 0]))+x[:, 1]**2))*(x[:, 1]**2*(2*(1/x[:, 1]))))) + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Zirilli(Function): + r"""Zirilli function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_Z.html#go_benchmark.Zirilli] + + .. math:: + f(x)=c_1x_1^4 - c_2x_1^2 + c_3x_1 + c_4x_2^2 + + + Default constant values are :math:`c = (0.25, 0.5, 0.1, 0.5)`. + + """ + def __init__(self, c1=0.25, c2=0.5, c3=0.1, c4=0.5, name="Zirilli"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.array([[-10, 10], [-10, 10]])) + + def __call__(self, x): + return ((((self.c1*x[:, 0]**4)-(self.c2*x[:, 0]**2))+(self.c3*x[:, 0]))+(self.c4*x[:, 1]**2)).reshape(-1, 1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + grad[:, 0, 0] = ((self.c1*(x[:, 0]**4*(4*(1/x[:, 0]))))-(self.c2*(x[:, 0]**2*(2*(1/x[:, 0])))))+self.c3 + grad[:, 0, 1] = self.c4*(x[:, 1]**2*(2*(1/x[:, 1]))) + + return grad + +# https://www.sfu.ca/~ssurjano/optimization.html, many local minima section, +# excluding discontinuous functions + +################################################################################ +################################################################################ +################################################################################ + +class DropWave(Function): + r"""DropWave function + + .. math:: + f(x)=-\frac{c_1+\cos(c_2\sqrt{x_1^2+x_2^2})}{c_3(x_1^2+x_2^2)+c_4} + + + Default constant values are :math:`c = (1., 12., 0.5, 2.)`. + + """ + def __init__(self, c1=1., c2=12., c3=0.5, c4=2., name="DropWave"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4 = c1, c2, c3, c4 + self.dim = 2 + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-5.12, 5.12])) + + def __call__(self, x): + self._numerator = (self.c1 + np.cos(self.c2 * np.sqrt(x[:, 0] ** 2 + x[:, 1] ** 2)))[:, np.newaxis] + self._denominator = (self.c3 * (x[:, 0] ** 2 + x[:, 1] ** 2) + self.c4)[:, np.newaxis] + return -self._numerator / self._denominator + + def grad(self, x): + _ = self.__call__(x) + x1, x2 = x[:, 0], x[:, 1] + dist_sq = (x1 ** 2 + x2 ** 2)[:, np.newaxis] + num_grad = -np.sin(self.c2 * np.sqrt(dist_sq)) * self.c2 * x / np.sqrt(dist_sq) + denom_grad = 2 * self.c3 * x + + return (-(num_grad * self._denominator - self._numerator * denom_grad) / (self._denominator ** 2))[:, np.newaxis, :] diff --git a/src/pytuq/func/benchNd.py b/src/pytuq/func/benchNd.py new file mode 100755 index 0000000..9fbfb4d --- /dev/null +++ b/src/pytuq/func/benchNd.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python +""" +Nd benchmark functions module. + +Most of the functions are taken from https://github.com/Vahanosi4ek/pytuq_funcs that autogenerates the codes given function's latex strings. +""" +import sys +import numpy as np + +from scipy.special import factorial +from scipy.stats import multivariate_normal + +from .func import Function + + +################################################################################ +################################################################################ +################################################################################ + +class Ackley(Function): + r""" + Ackley function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Ackley] + + Complex cosine function with many local minima + + .. math:: + f(x)=-c_1e^{-c_2\sqrt{\sum_{i=1}^{d}x_1^2}}-e^{\frac{1}{d}\sum_{i=1}^{d}\cos(c_3x_i)}+c_4+e + + + Default constant values are :math:`c = (20., 0.2, 2\pi)` and :math:`d = 2`. + """ + def __init__(self, c1=20., c2=0.2, c3=2*np.pi, c4=20., d=2, name="Ackley"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.d = c1, c2, c3, c4, d + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-32, 32])) + + def __call__(self, x): + t1 = np.exp(-self.c2 * np.sqrt(np.sum(x ** 2, axis=1, keepdims=True) / self.d)) + t2 = np.exp(np.sum(np.cos(self.c3 * x), axis=1, keepdims=True) / self.d) + return -self.c1 * t1 - t2 + self.c4 + np.exp(1.) + + def grad(self, x): + t1 = -np.exp(-self.c2 * np.sqrt(np.sum(x ** 2, axis=1, keepdims=True) / self.d)) * self.c2 * x / (self.d * np.sqrt(np.sum(x ** 2, axis=1, keepdims=True) / self.d)) + t2 = -np.exp(np.sum(np.cos(self.c3 * x), axis=1, keepdims=True) / self.d) * self.c3 * np.sin(self.c3 * x) / self.d + + return (-self.c1 * t1 - t2)[:, np.newaxis, :] + + +################################################################################ +################################################################################ +################################################################################ + +class Alpine01(Function): + r""" + Alpine01 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Alpine01] + + A N-d multimodal function + + .. math:: + f(x)=\sum_{i=1}^n |x_i\sin(x_i)+c_1x_2| + + + Default constant values are :math:`c_1 = 0.1`. + """ + def __init__(self, c1=0.1, d=2, name="Alpine01"): + super().__init__(name=name) + + self.c1 = c1 + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-10., 10.])) + + def __call__(self, x): + return np.sum(np.abs(x * np.sin(x) + self.c1 * x), axis=1, keepdims=True) + + def grad(self, x): + return (np.sign(x * np.sin(x) + self.c1 * x) * (x * np.cos(x) + np.sin(x) + self.c1))[:, np.newaxis, :] + + +################################################################################ +################################################################################ +################################################################################ + +class Alpine02(Function): + r""" + Alpine02 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.Alpine02] + + A N-d multimodal function + + .. math:: + f(x)=\prod_{i=1}^{n}\sqrt{x_i}\sin(x_i) + """ + def __init__(self, d=2, name="Alpine02"): + super().__init__(name=name) + + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 10.])) + + def __call__(self, x): + return np.prod(np.sqrt(x) * np.sin(x), axis=1, keepdims=True) + + def grad(self, x): + inner = np.sqrt(x) * np.sin(x) + inner_grad = np.sqrt(x) * np.cos(x) + 1 / 2 * x ** (-1 / 2) * np.sin(x) + return (self.__call__(x) / inner * inner_grad)[:, np.newaxis, :] + + +################################################################################ +################################################################################ +################################################################################ + +class AMGM(Function): + r""" + AMGM function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_A.html#go_benchmark.AMGM] + + Difference-squared between AM and GM + + .. math:: + f(x)=\left ( \frac{1}{n} \sum_{i=1}^{n} x_i - \sqrt[n]{ \prod_{i=1}^{n} x_i} \right )^2 + """ + def __init__(self, d=2, name="AMGM"): + super().__init__(name=name) + + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([0., 10.])) + + def __call__(self, x): + self._am = 1 / self.dim * np.sum(x, axis=1, keepdims=True) + self._gm = np.power(np.prod(x, axis=1, keepdims=True), 1 / self.dim) + return (self._am - self._gm) ** 2 + + def grad(self, x): + _ = self.__call__(x) + return (2 * (self._am - self._gm) * (1 / self.dim - 1 / self.dim * np.power((np.prod(x, axis=1, keepdims=True)), -1 / self.dim) * np.prod(x, axis=1, keepdims=True) / x))[:, np.newaxis, :] + + + +################################################################################ +################################################################################ +################################################################################ + +class Bohachevsky(Function): + r""" + Bohachevsky function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_B.html#go_benchmark.Bohachevsky] + + A N-d multimodal function + + .. math:: + f(x)=\sum_{i=1}^{n-1}\left[x_i^2 + c_1x_{i+1}^2 - c_2\cos(c_3\pi x_i) - c_4\cos(c_5\pi x_{i+1}) + c_6\right] + + + Default constant values are :math:`c = (2., 0.3, 3., 0.4, 4., 0.7)`. + """ + def __init__(self, c1=2., c2=0.3, c3=3., c4=0.4, c5=4., c6=0.7, d=2, name="Bohachevsky"): + super().__init__(name=name) + + self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.d = c1, c2, c3, c4, c5, c6, d + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-15., 15.])) + + def __call__(self, x): + x_shr = np.concatenate((x[:, 1:], x[:, :1]), axis=1) + inner = x ** 2 + self.c1 * x_shr ** 2 - self.c2 * np.cos(self.c3 * np.pi * x) - self.c4 * np.cos(self.c5 * np.pi * x_shr) + self.c6 + return np.sum(inner[:, :-1], axis=1, keepdims=True) + + def grad(self, x): + # Uses a trick where the first and last column cancel where we need them to. + x1 = np.concatenate((x[:, :-1], np.zeros((x.shape[0], 1))), axis=1) + x2 = np.concatenate((np.zeros((x.shape[0], 1)), x[:, 1:]), axis=1) + return (2 * x1 + self.c2 * self.c3 * np.pi * np.sin(self.c3 * np.pi * x1) + self.c1 * 2 * x2 + self.c4 * self.c5 * np.pi * np.sin(self.c5 * np.pi * x2))[:, np.newaxis, :] + + + + +################################################################################ +################################################################################ +################################################################################ + +class Cigar(Function): + r""" + Cigar function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.Cigar] + + A N-d multimodal function + + .. math:: + f(x)=x_1^2 + c_1 \sum_{i=2}^{n} x_i^2 + + + Default constant values are :math:`c = 10^3` + """ + def __init__(self, c1=10 ** 3, d=4, name="Cigar"): + super().__init__(name=name) + + self.c1, self.d = c1, d + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-100., 100.])) + + def __call__(self, x): + return (x[:, 0].reshape(-1, 1)**2+self.c1 * np.sum(x[:, 1:]**2, axis=1, keepdims=True)) + + def grad(self, x): + x_modified = np.concatenate((x[:, 0][:, np.newaxis], self.c1 * x[:, 1:]), axis=1) + grad = 2 * x_modified + + return grad[:, np.newaxis, :] + + + +################################################################################ +################################################################################ +################################################################################ + +class CosineMixture(Function): + r""" + CosineMixture function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_C.html#go_benchmark.CosineMixture] + + A N-d multimodal function + + .. math:: + f(x)=-c_1 \sum_{i=1}^n \cos(c_2 \pi x_i) - \sum_{i=1}^n x_i^2 + + + Default constant values are :math:`c = (0.1, 5.0) + """ + def __init__(self, c1=0.1, c2=5.0, d=2, name="CosineMixture"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-1., 1.])) + + def __call__(self, x): + return (-self.c1*np.sum(np.cos(self.c2*np.pi*x), axis=1)-np.sum(x**2.0, axis=1)).reshape(-1, 1) + + def grad(self, x): + grad = self.c1*self.c2*np.pi*np.sin(self.c2*np.pi*x)-2*x + + return grad[:, np.newaxis, :] + + +################################################################################ +################################################################################ +################################################################################ + +class Griewank(Function): + r""" + Griewank function + + Reference: [https://www.sfu.ca/~ssurjano/griewank.html] + + .. math:: + f(x)=\sum_{i=1}^{d}\frac{x_i^2}{c_1}-\prod_{i=1}^{d}\cos(\frac{x_i}{\sqrt{i}}) + c_2 + + + Default constant values are :math:`c = (4000., 1.)` and :math:`d = 2`. + """ + def __init__(self, c1=4000., c2=1., d=2, name="Griewank"): + super().__init__(name=name) + + self.c1, self.c2 = c1, c2 + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-600., 600.])) + + def __call__(self, x): + self._term1 = np.sum(x ** 2 / self.c1, axis=1, keepdims=True) + self._term2 = 1. + for i in range(1, self.dim + 1): + self._term2 *= np.cos(x[:, i - 1] / np.sqrt(i)) + self._term2 = self._term2 + + return self._term1 - self._term2[:, np.newaxis] + self.c2 + + def grad(self, x): + _ = self.__call__(x) + term1_grad = 2 * x / self.c1 + term2_grad = np.zeros((x.shape[0], self.dim)) + for i in range(1, self.dim + 1): + term2_grad[:, i - 1] = -self._term2 / np.cos(x[:, i - 1] / np.sqrt(i)) * np.sin(x[:, i - 1] / np.sqrt(i)) / np.sqrt(i) + + return (term1_grad - term2_grad)[:, np.newaxis, :] + + + +################################################################################ +################################################################################ +################################################################################ + +class Mishra07(Function): + r""" + Mishra07 function + + Reference: [https://infinity77.net/global_optimization/test_functions_nd_M.html#go_benchmark.Mishra07] + + A multimodal minimization function + + .. math:: + f(x)=\left(\prod_{i=1}^d x_i-d!\right)^2 + """ + def __init__(self, d=2, name="Mishra07"): + super().__init__(name=name) + + self.dim = d + self.outdim = 1 + + self.setDimDom(domain=np.ones((self.dim, 1)) * np.array([-1., 1.])) + + def __call__(self, x): + return ((np.prod(x,axis=1)-factorial(self.dim))**2).reshape(-1,1) + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + for i in range(self.dim): + grad[:, 0, i] = 2.0 * (np.prod(x,axis=1)-factorial(self.dim)) * np.prod(x,axis=1)/x[:, i] + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class MVN(Function): + r"""Multivariate Normal function. + + Reference: [https://en.wikipedia.org/wiki/Multivariate_normal_distribution] + + .. math:: + f(x) = \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right) + """ + def __init__(self, mean=[0., 0.], cov=[[1., 0.], [0., 1.]], name='MVN'): + super().__init__() + + self.mean = np.array(mean, dtype=float) + self.cov = np.array(cov, dtype=float) + + f = 4.0 + domain = np.tile(self.mean.reshape(-1,1), (1,2)) + stds = np.sqrt(np.diag(self.cov)) + domain[:,0] -= f * stds + domain[:,1] += f * stds + + self.setDimDom(domain=domain) + self.name = name + + self.outdim = 1 + + return + + def __call__(self, x): + self.checkDim(x) + + yy = multivariate_normal.pdf(x, mean=self.mean, cov=self.cov) + + return yy.reshape(-1,1) + + + def grad(self, x): + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + invcov = np.linalg.inv(self.cov) + + mvn_vals = self.__call__(x)[:,0] + + for j in range(self.dim): + diff = (x[:, j] - self.mean[j]) + grad[:, 0, j] = - mvn_vals * np.dot(invcov[j, :], (x - self.mean).T) + + return grad + + +################################################################################ +################################################################################ +################################################################################ + + +class NegAlpineN2(Function): + r"""Negative Alpine function. + + ... math:: + f(x) = - \sqrt{\prod_{i=1}^{d} x_i \cdot \sin(x_i)} + """ + def __init__(self, name='Alpine N2', dim=2): + super().__init__() + self.setDimDom(domain=np.array(np.tile([0.001, 10.], (dim, 1)))) + self.name = name + self.outdim = 1 + + return + + def __call__(self, x): + + self.checkDim(x) + + yy = - np.sqrt(np.prod(x, axis=1)) * np.prod(np.sin(x), axis=1) + + return yy.reshape(-1, 1) + + + def grad(self, x): + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + for j in range(self.dim): + grad[:, 0, j] = (0.5 / x[:, j] + 1./np.tan(x[:, j])) * self.__call__(x)[:,0] + + return grad + +################################################################################ +################################################################################ +################################################################################ + +class Sobol(Function): + r"""Sobol function + + Reference: [https://www.sfu.ca/~ssurjano/gfunc.html] + + .. math:: + f(x) = \prod_{i=1}^{d} \frac{|4x_i -2| + a_i}{1 + a_i} + """ + def __init__(self, name='Sobol', dim=5): + super().__init__() + self.name = name + self.dim = dim + self.outdim = 1 + + self.setDimDom(domain = np.ones((self.dim, 1)) * np.array([0., 1.])) + self.a = np.array([(i-2.)/2. for i in range(1,self.dim+1)]) + + + def __call__(self, x): + sam = x.shape[0] + self.checkDim(x) + + ydata=np.empty((sam, self.outdim)) + for j in range(sam): + val=1. + for k in range(self.dim): + val *= ( (abs(4.*x[j,k]-2.)+self.a[k])/(1.+self.a[k]) ) + ydata[j,0]=val + + return ydata + + def grad(self, x): + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + for i in range(self.dim): + partial = 1. / (1. + self.a[i]) * 4. * np.sign(4. * x[:, i] - 2.) + for j in range(self.dim): + if j != i: + partial *= ( (abs(4.*x[:,j]-2.)+self.a[j])/(1.+self.a[j]) ) + grad[:, 0, i] = partial + + return grad + + + +################################################################################ +################################################################################ +################################################################################ + + +class SumSquares(Function): + r"""SumSquares function. + + .. math:: + f(x) = \sum_{i=1}^{d} (1 + i) x_i^2 + + """ + def __init__(self, name='SumSquares', dim=5): + super().__init__() + self.setDimDom(domain=np.array(np.tile([-10.0, 10.0], (dim, 1)))) + self.name = name + self.outdim = 1 + + return + + def __call__(self, x): + + self.checkDim(x) + + yy = np.zeros((x.shape[0], self.outdim)) + for j in range(self.dim): + yy[:,0] += (1. + j) * x[:, j]**2 + + return yy.reshape(-1,1) + + + def grad(self, x): + + grad = np.zeros((x.shape[0], self.outdim, self.dim)) + + for j in range(self.dim): + grad[:, 0, j] = 2.0 * (1. + j) * x[:, j] + + return grad + + + diff --git a/src/pytuq/func/benchmark.py b/src/pytuq/func/benchmark.py deleted file mode 100755 index e6eb40d..0000000 --- a/src/pytuq/func/benchmark.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/env python - -import sys -import numpy as np -from scipy.stats import multivariate_normal - -from .func import Function - - -class Franke(Function): - def __init__(self, name='Franke'): - super().__init__() - self.name = name - self.dim = 2 - self.outdim = 1 - - self.setDimDom(domain = np.ones((self.dim, 1)) * np.array([0., 1.])) - - def __call__(self, x): - - - tt1 = 0.75*np.exp(-((9*x[:,0] - 2)**2 + (9*x[:,1] - 2)**2)/4.) - tt2 = 0.75*np.exp(-((9*x[:,0] + 1)**2)/49 - (9*x[:,1] + 1)/10.) - tt3 = 0.5*np.exp(-((9*x[:,0] - 7)**2 + (9*x[:,1] - 3)**2)/4.) - tt4 = -0.2*np.exp(-(9*x[:,0] - 4)**2 - (9*x[:,1] - 7)**2) - - - - return (tt1 + tt2 + tt3 + tt4).reshape(-1,self.outdim) - - def grad(self, x): - grad = np.zeros((x.shape[0], self.outdim, self.dim)) - - tt1 = 0.75*np.exp(-((9*x[:,0] - 2)**2 + (9*x[:,1] - 2)**2)/4.) - tt2 = 0.75*np.exp(-((9*x[:,0] + 1)**2)/49 - (9*x[:,1] + 1)/10.) - tt3 = 0.5*np.exp(-((9*x[:,0] - 7)**2 + (9*x[:,1] - 3)**2)/4.) - tt4 = -0.2*np.exp(-(9*x[:,0] - 4)**2 - (9*x[:,1] - 7)**2) - - grad[:,0,0] = -2*(9*x[:,0] - 2)*9/4 * tt1 - 2*(9*x[:,0] + 1)*9/49 * tt2 + \ - -2*(9*x[:,0] - 7)*9/4 * tt3 - 2*(9*x[:,0] - 4)*9 * tt4 - grad[:,0,1] = -2*(9*x[:,1] - 2)*9/4 * tt1 - 9./10. * tt2 + \ - -2*(9*x[:,1] - 3)*9/4 * tt3 - 2*(9*x[:,1] - 7)*9 * tt4 - - return grad - -class Sobol(Function): - # from https://www.sfu.ca/~ssurjano/gfunc.html - def __init__(self, name='Sobol', dim=5): - super().__init__() - self.name = name - self.dim = dim - self.outdim = 1 - - self.setDimDom(domain = np.ones((self.dim, 1)) * np.array([0., 1.])) - self.a = np.array([(i-2.)/2. for i in range(1,self.dim+1)]) - - - def __call__(self, x): - sam = x.shape[0] - self.checkDim(x) - - ydata=np.empty((sam, self.outdim)) - for j in range(sam): - val=1. - for k in range(self.dim): - val *= ( (abs(4.*x[j,k]-2.)+self.a[k])/(1.+self.a[k]) ) - ydata[j,0]=val - - return ydata - - - -class Ishigami(Function): - # from https://www.sfu.ca/~ssurjano/ishigami.html - def __init__(self, name='Ishigami'): - super().__init__() - self.name = name - self.dim = 3 - self.outdim = 1 - - - self.setDimDom(np.ones((self.dim, 1)) * np.array([-np.pi, np.pi])) - self.a = 7 - self.b = 0.1 - - - def __call__(self, x): - sam = x.shape[0] - self.checkDim(x) - - ydata=np.empty((sam, self.outdim)) - - for j in range(sam): - ydata[j, 0]=np.sin(x[j,0])+self.a*np.sin(x[j,1])**2+self.b*np.sin(x[j,0])*x[j,2]**4 - - return ydata - - - -class NegAlpineN2(Function): - """Negative Alpine function [http://benchmarkfcns.xyz/benchmarkfcns/alpinen2fcn.html] - - """ - def __init__(self, name='Alpine N2', dim=2): - super().__init__() - self.setDimDom(domain=np.array(np.tile([0.001, 10.], (dim, 1)))) - self.name = name - self.outdim = 1 - - return - - def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) - - yy = - np.sqrt(np.prod(x, axis=1)) * np.prod(np.sin(x), axis=1) - - return yy.reshape(-1, 1) - - - def grad(self, x): - - grad = np.zeros((x.shape[0], self.outdim, self.dim)) - - for j in range(self.dim): - grad[:, 0, j] = (0.5 / x[:, j] + 1./np.tan(x[:, j])) * self.__call__(x)[:,0] - - return grad - - -class Adjiman(Function): - """Adjiman function [http://benchmarkfcns.xyz/benchmarkfcns/adjimanfcn.html] - - """ - def __init__(self, name='Adjiman'): - super().__init__() - self.setDimDom(domain=np.array([[-1., 2.], [-1., 1.]])) - self.name = name - self.outdim = 1 - - return - - def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=2 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) - - yy = np.cos(x[:, 0]) * np.sin(x[:, 1]) - \ - x[:, 0] / (1.0 + x[:, 1] * x[:, 1]) - - return yy.reshape(-1,1) - - - def grad(self, x): - - grad = np.zeros((x.shape[0], self.outdim, self.dim)) - - grad[:, 0, 0] = - np.sin(x[:, 0]) * np.sin(x[:, 1]) - \ - 1. / (1.0 + x[:, 1] * x[:, 1]) - grad[:, 0, 1] = np.cos(x[:, 0]) * np.cos(x[:, 1]) + \ - 2. * x[:, 0] * x[:, 1] / (1.0 + x[:, 1] * x[:, 1])**2 - - return grad - - -class Branin(Function): - """Branin function [https://www.sfu.ca/~ssurjano/branin.html] - - """ - def __init__(self, name='Branin'): - super().__init__() - self.setDimDom(domain=np.array([[-5., 10.], [0., 15.]])) - self.name = name - self.outdim = 1 - - self.a_ = 1. - self.b_ = 5.1/(4*np.pi**2) - self.c_ = 5./np.pi - self.r_ = 6. - self.s_ = 10. - self.t_ = 1./(8.*np.pi) - - return - - def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=2 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) - - yy = self.a_ * (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_)**2 - yy += self.s_*(1.-self.t_)*np.cos(x[:, 0]) - yy += self.s_ - - return yy.reshape(-1,1) - - - def grad(self, x): - - grad = np.zeros((x.shape[0], self.outdim, self.dim)) - grad[:, 0, 0] = 2.0 * self.a_ * \ - (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_) * \ - (-2.0 * self.b_ * x[:, 0] + self.c_) - \ - self.s_ * (1. - self.t_) * np.sin(x[:, 0]) - grad[:, 0, 1] = 2.0 * self.a_ * \ - (x[:, 1] - self.b_ * x[:, 0]**2 + self.c_ * x[:, 0] - self.r_) - - - return grad - - - -class SumSquares(Function): - """SumSquares function [http://benchmarkfcns.xyz/benchmarkfcns/sumsquaresfcn.html] - - """ - def __init__(self, name='SumSquares', dim=5): - super().__init__() - self.setDimDom(domain=np.array(np.tile([-10.0, 10.0], (dim, 1)))) - self.name = name - self.outdim = 1 - - return - - def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) - - yy = np.zeros((x.shape[0], self.outdim)) - for j in range(self.dim): - yy[:,0] += (1. + j) * x[:, j]**2 - - return yy.reshape(-1,1) - - - def grad(self, x): - - grad = np.zeros((x.shape[0], self.outdim, self.dim)) - - for j in range(self.dim): - grad[:, 0, j] = 2.0 * (1. + j) * x[:, j] - - return grad - - -class Quadratic(Function): - """MVN function [REF] - - """ - def __init__(self, center, hess, name='Quadratic'): - super().__init__() - - self.center = np.array(center, dtype=float) - self.hess = np.array(hess, dtype=float) - - # TODO: use Hessian to inform the domain - domain = np.tile(self.center.reshape(-1,1), (1,2)) - domain[:,0] -= self.dmax*np.ones_like(self.center) - domain[:,1] += self.dmax*np.ones_like(self.center) - - self.setDimDom(domain=domain) - self.name = name - self.outdim = 1 - - return - - def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=1 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) - - nsam = x.shape[0] - yy = np.empty(nsam,) - for i in range(nsam): - yy[i] = 0.5 * np.dot(x[i, :]-self.center, np.dot(self.hess, x[i, :]-self.center)) - - - return yy.reshape(-1,1) - - -class MVN(Function): - """MVN function [REF] - - """ - def __init__(self, mean, cov, name='MVN'): - super().__init__() - - self.mean = np.array(mean, dtype=float) - self.cov = np.array(cov, dtype=float) - - # TODO: use cov to inform the domain - domain = np.tile(self.mean.reshape(-1,1), (1,2)) - domain[:,0] -= self.dmax*np.ones_like(self.mean) - domain[:,1] += self.dmax*np.ones_like(self.mean) - - self.setDimDom(domain=domain) - self.name = name - - self.outdim = 1 - - return - - def __call__(self, x): - self.checkDim(x) - - yy = multivariate_normal.pdf(x, mean=self.mean, cov=self.cov) - - return yy.reshape(-1,1) - - - - -class TFData(Function): - """Data generating model inspired by https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb#scrollTo=5zCEYpzu7bDX. - """ - - def __init__(self, name='tfdata'): - super().__init__() - self.name = name - - self.dim = 1 - self.outdim = 1 - - self.w0 = 0.125 - self.b0 = 5. - self.a = -20. - self.b = 60. - - self.setDimDom(domain=np.array([[self.a, self.b]])) - - return - - def __call__(self, x): - - y = (self.w0 * x * (1. + np.sin(x)) + self.b0) - - return y diff --git a/src/pytuq/func/chem.py b/src/pytuq/func/chem.py index d1c2743..c92d22f 100644 --- a/src/pytuq/func/chem.py +++ b/src/pytuq/func/chem.py @@ -1,13 +1,21 @@ #!/usr/bin/env python - +""" +Chemistry benchmark potential functions module. +""" import numpy as np from .func import Function class LennardJones(Function): - """Lennard Jones Potential + r"""Lennard Jones Potential + + Reference: [https://en.wikipedia.org/wiki/Lennard-Jones_potential] + .. math:: + f(r) = \epsilon \left( \left(\frac{r_0}{r}\right)^{2n} - 2 \left(\frac{r_0}{r}\right)^n \right) + + Default parameters are :math:`\epsilon = 1.0`, :math:`r_0 = 1.0`, and :math:`n=6`. """ def __init__(self, name='Lennard Jones', eps=1.0, r0=1.0, n=6): super().__init__() @@ -24,18 +32,7 @@ def __init__(self, name='Lennard Jones', eps=1.0, r0=1.0, n=6): return def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=1 dimensions - Returns - ------- - numpy array, 1dim - Vector of N values - """ self.checkDim(x) @@ -54,7 +51,12 @@ def grad(self, x): class MullerBrown(Function): - """Muller Brown Potential. + r"""Muller Brown Potential. + + Reference: [https://hunterheidenreich.com/notes/computational-chemistry/benchmark-problems/muller-brown-1979/] + + .. math:: + f(x,y) = \sum_{i=1}^{4} A_i \exp\left(a_i(x - x_{0i})^2 + b_i(x - x_{0i})(y - y_{0i}) + c_i(y - y_{0i})^2\right) """ def __init__(self, name='Muller-Brown'): @@ -74,18 +76,7 @@ def __init__(self, name='Muller-Brown'): return def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=2 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ + #: Ensure x is of the right dimensionality self.checkDim(x) diff --git a/src/pytuq/func/genz.py b/src/pytuq/func/genz.py index f1e643a..8a761b7 100644 --- a/src/pytuq/func/genz.py +++ b/src/pytuq/func/genz.py @@ -1,39 +1,19 @@ #!/usr/bin/env python - +""" + Genz function family module.""" import numpy as np from .func import Function class GenzBase(Function): - """Base class for Genz function family. - - Sets up shift, dimensional weights and domain. - - Attributes - ----------- - shifts : {number} - Shift parameter. - weights : {numpy array, 1d} - Dimensional weights. + """Base class for Genz function family + Attributes: + weights (np.ndarray): Dimensional weights. """ - def __init__(self, weights=[1.0], domain=None, name='Genz'): - """Initialization. - - Parameters - ---------- - shift : {number}, optional - Shift parameter (the default is 0.0, which means no shift) - weights : {list or numpy array, 1d}, optional - Dimensional weights (the default is [1.0], - which means 1d function with weight 1) - domain : {list or numpy array, 2d}, optional - Input domain of the function (the default is None, - which means [0,1]^d) - """ - + def __init__(self, weights=[1.0], domain=None, name='GenzBase'): super().__init__() self.weights = np.array(weights) @@ -50,7 +30,14 @@ def __init__(self, weights=[1.0], domain=None, name='Genz'): class GenzOscillatory(GenzBase): - """Genz Oscillatory function. + r"""Genz Oscillatory function + + Reference: [https://www.sfu.ca/~ssurjano/oscil.html] + + .. math:: + f(x) = \cos\left(2 \pi s + w^T x \right) + + Default values are :math:`s = 0` and :math:`w = [1.0]`. """ def __init__(self, shift=0.0, weights=[1.0], domain=None, @@ -81,7 +68,13 @@ def intgl(self): class GenzSum(GenzBase): - """Genz Oscillatory function. + r"""Genz Summation function + + .. math:: + f(x) = s + w^T x + + Default values are :math:`s = 0` and :math:`w = [1.0]`. + """ def __init__(self, shift=0.0, weights=[1.0], domain=None, @@ -108,7 +101,15 @@ def grad(self, x): return grad class GenzCornerPeak(GenzBase): - """Genz Corner Peak function. + r"""Genz Corner Peak function + + Reference: [https://www.sfu.ca/~ssurjano/copeak.html] + + .. math:: + f(x) = \frac{1}{(1 + w^T x)^{d+1}} + + Default values are :math:`w = [1.0]`. + """ def __init__(self, weights=[1.0], domain=None, name='Genz Corner Peak'): diff --git a/src/pytuq/func/oper.py b/src/pytuq/func/oper.py index 7d3bd2f..ed0dc43 100644 --- a/src/pytuq/func/oper.py +++ b/src/pytuq/func/oper.py @@ -8,6 +8,22 @@ from .func import Function class CartesProdFcn(Function): + """Cartesian product of two functions. + + Computes the element-wise product of two functions evaluated on different + subspaces of the input domain. + + Args: + fcn1: First function to multiply. + fcn2: Second function to multiply. + name: Name of the composite function. Defaults to 'CartesProduct'. + + Attributes: + fcn1: The first function in the product. + fcn2: The second function in the product. + domain: Combined domain from both functions. + outdim: Output dimension (must match for both functions). + """ def __init__(self, fcn1, fcn2, name='CartesProduct'): super().__init__() self.fcn1 = fcn1 @@ -34,6 +50,21 @@ def grad(self, x): return np.concatenate((g1, g2), axis=2) class GradFcn(Function): + """Gradient extraction function. + + Extracts the gradient with respect to a specific input dimension from a function. + + Args: + fcn: The function to extract gradient from. + idim: Index of the input dimension for gradient extraction. + name: Name of the gradient function. Defaults to 'GradFcn'. + + Attributes: + fcn: The underlying function. + idim: Input dimension index for gradient extraction. + outdim: Output dimension (inherited from fcn). + domain: Domain of the function (inherited from fcn). + """ def __init__(self, fcn, idim, name='GradFcn'): super().__init__() self.fcn = fcn @@ -54,6 +85,22 @@ def __call__(self, x): class ComposeFcn(Function): + """Function composition. + + Composes two functions such that the output is fcn2(fcn1(x)). + The output dimension of fcn1 must match the input dimension of fcn2. + + Args: + fcn1: The inner function to be evaluated first. + fcn2: The outer function to be evaluated on fcn1's output. + name: Name of the composite function. Defaults to 'Composite'. + + Attributes: + fcn1: The inner function. + fcn2: The outer function. + outdim: Output dimension (inherited from fcn2). + domain: Domain of the composite function (inherited from fcn1). + """ def __init__(self, fcn1, fcn2, name='Composite'): super().__init__() self.fcn1 = fcn1 @@ -79,6 +126,24 @@ def grad(self, x): return grad class SliceFcn(Function): + """Sliced function evaluation. + + Evaluates a function on a subset of input dimensions while fixing others at + nominal values. + + Args: + fcn: The function to slice. + name: Name of the sliced function. Defaults to 'Slice'. + ind: List of input dimension indices to keep active. Defaults to [0]. + nom: Nominal values for fixed dimensions. Defaults to None. + + Attributes: + fcn: The underlying function. + ind: Active dimension indices. + nom: Nominal values for fixed dimensions. + domain: Sliced domain containing only active dimensions. + outdim: Output dimension (inherited from fcn). + """ def __init__(self, fcn, name='Slice', ind=[0], nom=None): super().__init__() self.fcn = fcn @@ -101,6 +166,24 @@ def grad(self, x): class ShiftFcn(Function): + """Shifted function. + + Applies a spatial shift to the input domain of a function, evaluating + fcn(x - shift). + + Args: + fcn: The function to shift. + shift: Shift vector to subtract from input. + domain: New domain for the shifted function. If None, automatically + computed from fcn's domain. Defaults to None. + name: Name of the shifted function. Defaults to 'Shift'. + + Attributes: + fcn: The underlying function. + shift: The shift vector. + domain: Domain of the shifted function. + outdim: Output dimension (inherited from fcn). + """ def __init__(self, fcn, shift, domain=None,name='Shift'): super().__init__() assert(fcn.dim==len(shift)) @@ -120,6 +203,23 @@ def grad(self, x): return self.fcn.grad(x-self.shift) class LinTransformFcn(Function): + """Linear transformation of function output. + + Applies a linear transformation to the function output: scale * fcn(x) + shift. + + Args: + fcn: The function to transform. + scale: Scaling factor for the output. + shift: Shift/offset to add to the scaled output. + name: Name of the transformed function. Defaults to 'LinTransform'. + + Attributes: + fcn: The underlying function. + scale: Output scaling factor. + shift: Output shift/offset. + domain: Domain of the function (inherited from fcn). + outdim: Output dimension (inherited from fcn). + """ def __init__(self, fcn, scale, shift, name='LinTransform'): super().__init__() self.fcn = fcn @@ -139,8 +239,22 @@ def grad(self, x): class PickDim(Function): - """Picking dimension function [REF] - + """Dimension picking function. + + Selects a single dimension from the input vector and optionally scales it. + Returns cf * x[pdim] as a scalar output. + + Args: + dim: Total number of input dimensions. + pdim: Index of the dimension to pick. + cf: Scaling coefficient for the picked dimension. Defaults to 1.0. + name: Name of the function. Defaults to 'Dimension-Pick'. + + Attributes: + cf: Scaling coefficient. + pdim: Index of picked dimension. + dim: Total input dimensions. + outdim: Output dimension (always 1). """ def __init__(self, dim, pdim, cf=1.0, name='Dimension-Pick'): super().__init__() @@ -151,18 +265,6 @@ def __init__(self, dim, pdim, cf=1.0, name='Dimension-Pick'): self.outdim = 1 def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=1 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ self.checkDim(x) diff --git a/src/pytuq/func/poly.py b/src/pytuq/func/poly.py index 97e0b3d..c38b77d 100644 --- a/src/pytuq/func/poly.py +++ b/src/pytuq/func/poly.py @@ -1,15 +1,35 @@ #!/usr/bin/env python - +""" + Polynomial expansions module. +""" import numpy as np from .func import Function class PolyBase(Function): + r"""Base class for polynomial functions + + Attributes: + mindex (numpy array, 2d): Multi-index array for polynomial terms. + cfs (numpy array, 1d): Coefficients for polynomial terms. + nbases (int): Number of basis functions. + name (str): Name of the polynomial function. + max_deg (numpy array, 1d): Maximum degree for each dimension. + outdim (int): Output dimension (default is 1). + bases1d (list): List of 1D basis functions. + bases1d_deriv (list): List of derivatives of 1D basis functions. - def __init__(self, mindex, cfs, domain=None, name='Poly'): + """ + + def __init__(self, mindex=np.array([[0], [1]]), cfs=None, domain=None, name='Poly'): super().__init__() + + nbases = mindex.shape[0] + if cfs is None: + cfs = np.random.rand(nbases) + self.mindex = mindex self.cfs = cfs self.nbases = self.cfs.shape[0] @@ -91,8 +111,12 @@ def grad(self, x): return gval class Leg(PolyBase): - def __init__(self, mindex, cfs, domain=None, name='Legendre_Poly'): - super().__init__(mindex, cfs, domain=domain, name=name) + r"""Legendre polynomial expansion + + Reference: [https://en.wikipedia.org/wiki/Legendre_polynomials] + """ + def __init__(self, mindex=np.array([[0], [1]]), cfs=None, domain=None, name='Legendre_Poly'): + super().__init__(mindex=mindex, cfs=cfs, domain=domain, name=name) cfs_ = np.zeros(np.max(self.max_deg) + 1) poly = np.polynomial.legendre.Legendre(cfs_) @@ -105,8 +129,12 @@ def __init__(self, mindex, cfs, domain=None, name='Legendre_Poly'): class Mon(PolyBase): - def __init__(self, mindex, cfs, domain=None, name='Monomial_Poly'): - super().__init__(mindex, cfs, domain=domain, name=name) + r"""Monomial polynomial expansion + + Reference: [https://en.wikipedia.org/wiki/Monomial] + """ + def __init__(self, mindex=np.array([[0], [1]]), cfs=None, domain=None, name='Monomial_Poly'): + super().__init__(mindex=mindex, cfs=cfs, domain=domain, name=name) cfs_ = np.zeros(np.max(self.max_deg) + 1) poly = np.polynomial.polynomial.Polynomial(cfs_) diff --git a/src/pytuq/func/toy.py b/src/pytuq/func/toy.py old mode 100644 new mode 100755 index 08704cc..d52d629 --- a/src/pytuq/func/toy.py +++ b/src/pytuq/func/toy.py @@ -1,15 +1,21 @@ #!/usr/bin/env python - +""" + Toy benchmark functions module.""" import numpy as np from .func import Function class Constant(Function): - """Multioutput Constant function [REF] + r"""Multioutput Constant function + + Returns a constant vector for any input. + + .. math:: + f(x) = (c, c, \ldots, c) """ - def __init__(self, dim, const, name='Constant'): + def __init__(self, dim=1, const=np.array([1.0]), name='Constant'): super().__init__() self.setDimDom(dimension=dim) self.name = name @@ -19,19 +25,6 @@ def __init__(self, dim, const, name='Constant'): return def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=1 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - self.checkDim(x) nx = x.shape[0] @@ -45,10 +38,15 @@ def grad(self, x): class Identity(Function): - """Identity + r"""Identity function + + Returns the input unchanged. + + .. math:: + f(x) = x """ - def __init__(self, dim, name='Identity'): + def __init__(self, dim=1, name='Identity'): super().__init__() self.setDimDom(dimension=dim) self.name = name @@ -57,20 +55,7 @@ def __init__(self, dim, name='Identity'): return def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2dim - Nxd array of N points in d=1 dimensions - - Returns - ------- - numpy array, 1dim - Vector of N values - """ - - self.checkDim(x) # TODO: MAKE THIS A DECORATOR? + self.checkDim(x) return x @@ -82,6 +67,12 @@ def grad(self, x): class Quad(Function): + r"""Quadratic function. + + .. math:: + f(x) = 3 + x - x^2 + + """ def __init__(self, name='Quad'): super().__init__() self.setDimDom(dimension=1) @@ -89,19 +80,6 @@ def __init__(self, name='Quad'): self.outdim = 1 def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2d - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1d - Vector of N values - """ - #: Ensure x is of the right dimensionality self.checkDim(x) @@ -115,6 +93,12 @@ def grad(self, x): class Quad2d(Function): + r"""2D Quadratic function + + .. math:: + f(x) = 3 + x_1 - x_2^2 + + """ def __init__(self, name='Quad2d'): super().__init__() self.setDimDom(dimension=2) @@ -122,19 +106,6 @@ def __init__(self, name='Quad2d'): self.outdim = 1 def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2d - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1d - Vector of N values - """ - #: Ensure x is of the right dimensionality self.checkDim(x) @@ -153,6 +124,12 @@ def grad(self, x): class Exp(Function): + r"""Exponential function with weighted input + + .. math:: + f(x) = e^{w^T x} + + """ def __init__(self, weights=[1.], name='Exp'): super().__init__() self.name = name @@ -161,19 +138,6 @@ def __init__(self, weights=[1.], name='Exp'): self.outdim = 1 def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2d - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1d - Vector of N values - """ - #: Ensure x is of the right dimensionality self.checkDim(x) @@ -190,6 +154,11 @@ def grad(self, x): class Log(Function): + r"""Logarithm function with weighted input + + .. math:: + f(x) = \log|w^T x| + """ def __init__(self, weights=[1.], name='Log'): super().__init__() self.name = name @@ -198,19 +167,6 @@ def __init__(self, weights=[1.], name='Log'): self.outdim = 1 def __call__(self, x): - """Function call. - - Parameters - ---------- - x : numpy array, 2d - Nxd array of N points in d dimensions - - Returns - ------- - numpy array, 1d - Vector of N values - """ - #: Ensure x is of the right dimensionality self.checkDim(x) diff --git a/src/pytuq/linred/klnn.py b/src/pytuq/linred/klnn.py index fc51b7f..d1b5c30 100755 --- a/src/pytuq/linred/klnn.py +++ b/src/pytuq/linred/klnn.py @@ -13,6 +13,7 @@ from quinn.nns.mlp import MLP except ImportError: print("Warning: QUiNN not installed. NN functionality won't work.") + sys.exit() myrc() diff --git a/src/pytuq/linred/klsurr.py b/src/pytuq/linred/klsurr.py index e62745c..d58032a 100755 --- a/src/pytuq/linred/klsurr.py +++ b/src/pytuq/linred/klsurr.py @@ -19,6 +19,8 @@ from quinn.nns.rnet import RNet, NonPar except ImportError: print("Warning: QUiNN not installed. NN functionality won't work.") + sys.exit() + myrc() diff --git a/src/pytuq/lreg/anl.py b/src/pytuq/lreg/anl.py index 0463994..11d90f5 100755 --- a/src/pytuq/lreg/anl.py +++ b/src/pytuq/lreg/anl.py @@ -94,7 +94,6 @@ def fita(self, Amat, y): # True posterior covariance if self.method == 'full': self.cf_cov = sigmahatsq*invptp - np.savetxt('covar.txt', self.cf_cov) # Variational covariance elif self.method == 'vi': self.cf_cov = np.diag(sigmahatsq/np.diag(np.dot(Amat.T, Amat)+self.cov_nugget*np.diag(np.ones((nbas,))))) diff --git a/src/pytuq/utils/xutils.py b/src/pytuq/utils/xutils.py index 24af338..2abd259 100755 --- a/src/pytuq/utils/xutils.py +++ b/src/pytuq/utils/xutils.py @@ -3,8 +3,12 @@ import sys import os +import inspect import itertools import numpy as np +import importlib.util +from pathlib import Path + try: import dill as pk except ModuleNotFoundError: @@ -286,3 +290,25 @@ def safe_cholesky(cov): assert(np.linalg.norm(cov - np.dot(lower, lower.T)) < 1.e-12) return lower + +#################################################################### +#################################################################### + +def instantiate_classes_from_module(module_name): + """ + Imports a module and returns a list of instantiated objects for every class in it. + Only classes defined inside the module are included (not imported ones). + """ + module = importlib.import_module(module_name) + objects = [] + + for name, obj in inspect.getmembers(module, inspect.isclass): + # Only include classes actually defined in this module + if obj.__module__ == module_name: + try: + instance = obj() # Try to instantiate without arguments + objects.append(instance) + except Exception as e: + print(f"Could not instantiate {name}: {e}") + + return objects