import numpy as np
from math import inf
from spotPython.fun.objectivefunctions import analytical
from spotPython.spot import spot
import matplotlib.pyplot as plt
from spotPython.utils.init import fun_control_init, get_spot_tensorboard_path
from spotPython.utils.init import fun_control_init, design_control_init, surrogate_control_init
= "09" PREFIX
14 Optimal Computational Budget Allocation in Spot
This chapter demonstrates how noisy functions can be handled with Optimal Computational Budget Allocation (OCBA) by Spot
.
14.1 Example: Spot
, OCBA, and the Noisy Sphere Function
14.1.1 The Objective Function: Noisy Sphere
The spotPython
package provides several classes of objective functions. We will use an analytical objective function with noise, i.e., a function that can be described by a (closed) formula: \[f(x) = x^2 + \epsilon\]
Since sigma
is set to 0.1
, noise is added to the function:
= analytical().fun_sphere
fun = fun_control_init(
fun_control =PREFIX,
PREFIX=0.1) sigma
Created spot_tensorboard_path: runs/spot_logs/09_maans14_2024-04-22_00-27-42 for SummaryWriter()
A plot illustrates the noise:
= np.linspace(-1,1,100).reshape(-1,1)
x = fun(x, fun_control=fun_control)
y
plt.figure()"k")
plt.plot(x,y, plt.show()
Spot
is adopted as follows to cope with noisy functions:
fun_repeats
is set to a value larger than 1 (here: 2)noise
is set totrue
. Therefore, a nugget (Lambda
) term is added to the correlation matrixinit size
(of thedesign_control
dictionary) is set to a value larger than 1 (here: 2)
= spot.Spot(fun=fun,
spot_1_noisy =fun_control_init(
fun_control= np.array([-1]),
lower = np.array([1]),
upper = 20,
fun_evals = 2,
fun_repeats ="ei",
infill_criterion= True,
noise =0.0,
tolerance_x= 1,
ocba_delta =True),
show_models=design_control_init(init_size=3, repeats=2),
design_control=surrogate_control_init(noise=True)) surrogate_control
spot_1_noisy.run()
spotPython tuning: 0.03475493366922229 [####------] 40.00%
spotPython tuning: 0.0004463018568303854 [#####-----] 50.00%
spotPython tuning: 0.0004463018568303854 [######----] 60.00%
spotPython tuning: 0.0001590474610240226 [#######---] 70.00%
spotPython tuning: 4.2454542934289965e-09 [########--] 80.00%
spotPython tuning: 2.2370853591440457e-10 [#########-] 90.00%
spotPython tuning: 2.2370853591440457e-10 [##########] 100.00% Done...
{'CHECKPOINT_PATH': 'runs/saved_models/',
'DATASET_PATH': 'data/',
'PREFIX': None,
'RESULTS_PATH': 'results/',
'TENSORBOARD_PATH': 'runs/',
'_L_in': None,
'_L_out': None,
'_torchmetric': None,
'accelerator': 'auto',
'converters': None,
'core_model': None,
'core_model_name': None,
'counter': 20,
'data': None,
'data_dir': './data',
'data_module': None,
'data_set': None,
'data_set_name': None,
'db_dict_name': None,
'design': None,
'device': None,
'devices': 1,
'enable_progress_bar': False,
'eval': None,
'fun_evals': 20,
'fun_repeats': 2,
'horizon': None,
'infill_criterion': 'ei',
'k_folds': 3,
'log_graph': False,
'log_level': 50,
'loss_function': None,
'lower': array([-1]),
'max_surrogate_points': 30,
'max_time': 1,
'metric_params': {},
'metric_river': None,
'metric_sklearn': None,
'metric_sklearn_name': None,
'metric_torch': None,
'model_dict': {},
'n_points': 1,
'n_samples': None,
'n_total': None,
'noise': True,
'num_workers': 0,
'ocba_delta': 1,
'oml_grace_period': None,
'optimizer': None,
'path': None,
'prep_model': None,
'prep_model_name': None,
'progress_file': None,
'save_model': False,
'scenario': None,
'seed': 123,
'show_batch_interval': 1000000,
'show_models': True,
'show_progress': True,
'shuffle': None,
'sigma': 0.0,
'spot_tensorboard_path': None,
'spot_writer': None,
'target_column': None,
'target_type': None,
'task': None,
'test': None,
'test_seed': 1234,
'test_size': 0.4,
'tolerance_x': 0.0,
'train': None,
'upper': array([1]),
'var_name': None,
'var_type': ['num'],
'verbosity': 0,
'weight_coeff': 0.0,
'weights': 1.0,
'weights_entry': None}
14.2 Print the Results
spot_1_noisy.print_results()
min y: 2.2370853591440457e-10
min mean y: 2.2370853591440457e-10
x0: -1.4956889245909544e-05
[['x0', -1.4956889245909544e-05]]
=False) spot_1_noisy.plot_progress(log_y
14.3 Noise and Surrogates: The Nugget Effect
14.3.1 The Noisy Sphere
14.3.1.1 The Data
We prepare some data first:
import numpy as np
import spotPython
from spotPython.fun.objectivefunctions import analytical
from spotPython.spot import spot
from spotPython.design.spacefilling import spacefilling
from spotPython.build.kriging import Kriging
import matplotlib.pyplot as plt
= spacefilling(1)
gen = np.random.RandomState(1)
rng = np.array([-10])
lower = np.array([10])
upper = analytical().fun_sphere
fun = fun_control_init(
fun_control =2,
sigma=125)
seed= gen.scipy_lhd(10, lower=lower, upper = upper)
X = fun(X, fun_control=fun_control)
y = X.reshape(-1,1)
X_train = y y_train
A surrogate without nugget is fitted to these data:
= Kriging(name='kriging',
S =123,
seed=50,
log_level=1,
n_theta=False)
noise
S.fit(X_train, y_train)
= np.linspace(start=-13, stop=13, num=1000).reshape(-1, 1)
X_axis = S.predict(X_axis, return_val="all")
mean_prediction, std_prediction, ei
="Observations")
plt.scatter(X_train, y_train, label="mue")
plt.plot(X_axis, mean_prediction, label
plt.legend()"$x$")
plt.xlabel("$f(x)$")
plt.ylabel(= plt.title("Sphere: Gaussian process regression on noisy dataset") _
In comparison to the surrogate without nugget, we fit a surrogate with nugget to the data:
= Kriging(name='kriging',
S_nug =123,
seed=50,
log_level=1,
n_theta=True)
noise
S_nug.fit(X_train, y_train)= np.linspace(start=-13, stop=13, num=1000).reshape(-1, 1)
X_axis = S_nug.predict(X_axis, return_val="all")
mean_prediction, std_prediction, ei ="Observations")
plt.scatter(X_train, y_train, label="mue")
plt.plot(X_axis, mean_prediction, label
plt.legend()"$x$")
plt.xlabel("$f(x)$")
plt.ylabel(= plt.title("Sphere: Gaussian process regression with nugget on noisy dataset") _
The value of the nugget term can be extracted from the model as follows:
S.Lambda
S_nug.Lambda
8.374496269458742e-05
We see:
- the first model
S
has no nugget, - whereas the second model has a nugget value (
Lambda
) larger than zero.
14.4 Exercises
14.4.1 Noisy fun_cubed
Analyse the effect of noise on the fun_cubed
function with the following settings:
= analytical().fun_cubed
fun = fun_control_init(
fun_control =10,
sigma=123)
seed= np.array([-10])
lower = np.array([10]) upper
14.4.2 fun_runge
Analyse the effect of noise on the fun_runge
function with the following settings:
= np.array([-10])
lower = np.array([10])
upper = analytical().fun_runge
fun = fun_control_init(
fun_control =0.25,
sigma=123) seed
14.4.3 fun_forrester
Analyse the effect of noise on the fun_forrester
function with the following settings:
= np.array([0])
lower = np.array([1])
upper = analytical().fun_forrester
fun = {"sigma": 5,
fun_control "seed": 123}
14.4.4 fun_xsin
Analyse the effect of noise on the fun_xsin
function with the following settings:
= np.array([-1.])
lower = np.array([1.])
upper = analytical().fun_xsin
fun = fun_control_init(
fun_control =0.5,
sigma=123) seed