Skip to content

hypertorch

HyperTorch

Hyperparameter Tuning for Torch.

Parameters:

Name Type Description Default
seed int

seed for random number generator. See Numpy Random Sampling

126
log_level int

log level for logger. Default is 50.

50

Attributes:

Name Type Description
seed int

seed for random number generator.

rng Generator

random number generator.

fun_control dict

dictionary containing control parameters for the function.

log_level int

log level for logger.

Source code in spotPython/fun/hypertorch.py
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
class HyperTorch:
    """
    Hyperparameter Tuning for Torch.

    Args:
        seed (int): seed for random number generator.
            See Numpy Random Sampling
        log_level (int): log level for logger. Default is 50.

    Attributes:
        seed (int): seed for random number generator.
        rng (Generator): random number generator.
        fun_control (dict): dictionary containing control parameters for the function.
        log_level (int): log level for logger.
    """

    def __init__(self, seed: int = 126, log_level: int = 50):
        self.seed = seed
        self.rng = default_rng(seed=self.seed)
        self.fun_control = {
            "seed": None,
            "data": None,
            "step": 10_000,
            "horizon": None,
            "grace_period": None,
            "metric_river": None,
            "metric_sklearn": None,
            "weights": array([1, 0, 0]),
            "weight_coeff": 0.0,
            "log_level": log_level,
            "var_name": [],
            "var_type": [],
        }
        self.log_level = self.fun_control["log_level"]
        logger.setLevel(self.log_level)
        logger.info(f"Starting the logger at level {self.log_level} for module {__name__}:")

    def check_X_shape(self, X: np.ndarray) -> None:
        """
        Check the shape of the input array X.

        Args:
            X (np.ndarray): input array.

        Raises:
            Exception: if the second dimension of X does not match the length of var_name in fun_control.

        Examples:
            >>> from spotPython.fun.hypertorch import HyperTorch
            >>> import numpy as np
            >>> hyper_torch = HyperTorch(seed=126, log_level=50)
            >>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
            >>> hyper_torch.check_X_shape(np.array([[1, 2], [3, 4]]))
            >>> hyper_torch.check_X_shape(np.array([1, 2]))
            Traceback (most recent call last):
            ...
            Exception

        """
        try:
            X.shape[1]
        except ValueError:
            X = np.array([X])
        if X.shape[1] != len(self.fun_control["var_name"]):
            raise Exception

    def fun_torch(self, X: np.ndarray, fun_control: dict = None) -> np.ndarray:
        """
        Function to be optimized.

        Args:
            X (np.ndarray): input array.
            fun_control (dict): dictionary containing control parameters for the function.
        Returns:
            np.ndarray: output array.

        Examples:
            >>> from spotPython.fun.hypertorch import HyperTorch
            >>> import numpy as np
            >>> hyper_torch = HyperTorch(seed=126, log_level=50)
            >>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
            >>> hyper_torch.fun_torch(np.array([[1, 2], [3, 4]]))

        """
        z_res = np.array([], dtype=float)
        self.fun_control.update(fun_control)
        self.check_X_shape(X)
        var_dict = assign_values(X, self.fun_control["var_name"])
        for config in generate_one_config_from_var_dict(var_dict, self.fun_control):
            print(f"\nconfig: {config}")
            config_id = generate_config_id(config)
            if self.fun_control["prep_model"] is not None:
                model = make_pipeline(self.fun_control["prep_model"], self.fun_control["core_model"](**config))
            else:
                model = self.fun_control["core_model"](**config)
            try:
                if self.fun_control["eval"] == "train_cv":
                    df_eval, _ = evaluate_cv(
                        model,
                        dataset=fun_control["train"],
                        shuffle=self.fun_control["shuffle"],
                        device=self.fun_control["device"],
                        show_batch_interval=self.fun_control["show_batch_interval"],
                        task=self.fun_control["task"],
                        writer=self.fun_control["spot_writer"],
                        writerId=config_id,
                    )
                elif self.fun_control["eval"] == "test_cv":
                    df_eval, _ = evaluate_cv(
                        model,
                        dataset=fun_control["test"],
                        shuffle=self.fun_control["shuffle"],
                        device=self.fun_control["device"],
                        show_batch_interval=self.fun_control["show_batch_interval"],
                        task=self.fun_control["task"],
                        writer=self.fun_control["spot_writer"],
                        writerId=config_id,
                    )
                elif self.fun_control["eval"] == "test_hold_out":
                    df_eval, _ = evaluate_hold_out(
                        model,
                        train_dataset=fun_control["train"],
                        shuffle=self.fun_control["shuffle"],
                        loss_function=self.fun_control["loss_function"],
                        metric=self.fun_control["metric_torch"],
                        test_dataset=fun_control["test"],
                        device=self.fun_control["device"],
                        show_batch_interval=self.fun_control["show_batch_interval"],
                        path=self.fun_control["path"],
                        task=self.fun_control["task"],
                        writer=self.fun_control["spot_writer"],
                        writerId=config_id,
                    )
                else:  # eval == "train_hold_out"
                    df_eval, _ = evaluate_hold_out(
                        model,
                        train_dataset=fun_control["train"],
                        shuffle=self.fun_control["shuffle"],
                        loss_function=self.fun_control["loss_function"],
                        metric=self.fun_control["metric_torch"],
                        device=self.fun_control["device"],
                        show_batch_interval=self.fun_control["show_batch_interval"],
                        path=self.fun_control["path"],
                        task=self.fun_control["task"],
                        writer=self.fun_control["spot_writer"],
                        writerId=config_id,
                    )
            except Exception as err:
                print(f"Error in fun_torch(). Call to evaluate_model failed. {err=}, {type(err)=}")
                print("Setting df_eval to np.nan")
                df_eval = np.nan
            z_val = fun_control["weights"] * df_eval
            if self.fun_control["spot_writer"] is not None:
                writer = self.fun_control["spot_writer"]
                writer.add_hparams(config, {"fun_torch: loss": z_val})
                writer.flush()
            z_res = np.append(z_res, z_val)
        return z_res

check_X_shape(X)

Check the shape of the input array X.

Parameters:

Name Type Description Default
X ndarray

input array.

required

Raises:

Type Description
Exception

if the second dimension of X does not match the length of var_name in fun_control.

Examples:

>>> from spotPython.fun.hypertorch import HyperTorch
>>> import numpy as np
>>> hyper_torch = HyperTorch(seed=126, log_level=50)
>>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
>>> hyper_torch.check_X_shape(np.array([[1, 2], [3, 4]]))
>>> hyper_torch.check_X_shape(np.array([1, 2]))
Traceback (most recent call last):
...
Exception
Source code in spotPython/fun/hypertorch.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
def check_X_shape(self, X: np.ndarray) -> None:
    """
    Check the shape of the input array X.

    Args:
        X (np.ndarray): input array.

    Raises:
        Exception: if the second dimension of X does not match the length of var_name in fun_control.

    Examples:
        >>> from spotPython.fun.hypertorch import HyperTorch
        >>> import numpy as np
        >>> hyper_torch = HyperTorch(seed=126, log_level=50)
        >>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
        >>> hyper_torch.check_X_shape(np.array([[1, 2], [3, 4]]))
        >>> hyper_torch.check_X_shape(np.array([1, 2]))
        Traceback (most recent call last):
        ...
        Exception

    """
    try:
        X.shape[1]
    except ValueError:
        X = np.array([X])
    if X.shape[1] != len(self.fun_control["var_name"]):
        raise Exception

fun_torch(X, fun_control=None)

Function to be optimized.

Parameters:

Name Type Description Default
X ndarray

input array.

required
fun_control dict

dictionary containing control parameters for the function.

None

Returns: np.ndarray: output array.

Examples:

>>> from spotPython.fun.hypertorch import HyperTorch
>>> import numpy as np
>>> hyper_torch = HyperTorch(seed=126, log_level=50)
>>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
>>> hyper_torch.fun_torch(np.array([[1, 2], [3, 4]]))
Source code in spotPython/fun/hypertorch.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
def fun_torch(self, X: np.ndarray, fun_control: dict = None) -> np.ndarray:
    """
    Function to be optimized.

    Args:
        X (np.ndarray): input array.
        fun_control (dict): dictionary containing control parameters for the function.
    Returns:
        np.ndarray: output array.

    Examples:
        >>> from spotPython.fun.hypertorch import HyperTorch
        >>> import numpy as np
        >>> hyper_torch = HyperTorch(seed=126, log_level=50)
        >>> hyper_torch.fun_control["var_name"] = ["x1", "x2"]
        >>> hyper_torch.fun_torch(np.array([[1, 2], [3, 4]]))

    """
    z_res = np.array([], dtype=float)
    self.fun_control.update(fun_control)
    self.check_X_shape(X)
    var_dict = assign_values(X, self.fun_control["var_name"])
    for config in generate_one_config_from_var_dict(var_dict, self.fun_control):
        print(f"\nconfig: {config}")
        config_id = generate_config_id(config)
        if self.fun_control["prep_model"] is not None:
            model = make_pipeline(self.fun_control["prep_model"], self.fun_control["core_model"](**config))
        else:
            model = self.fun_control["core_model"](**config)
        try:
            if self.fun_control["eval"] == "train_cv":
                df_eval, _ = evaluate_cv(
                    model,
                    dataset=fun_control["train"],
                    shuffle=self.fun_control["shuffle"],
                    device=self.fun_control["device"],
                    show_batch_interval=self.fun_control["show_batch_interval"],
                    task=self.fun_control["task"],
                    writer=self.fun_control["spot_writer"],
                    writerId=config_id,
                )
            elif self.fun_control["eval"] == "test_cv":
                df_eval, _ = evaluate_cv(
                    model,
                    dataset=fun_control["test"],
                    shuffle=self.fun_control["shuffle"],
                    device=self.fun_control["device"],
                    show_batch_interval=self.fun_control["show_batch_interval"],
                    task=self.fun_control["task"],
                    writer=self.fun_control["spot_writer"],
                    writerId=config_id,
                )
            elif self.fun_control["eval"] == "test_hold_out":
                df_eval, _ = evaluate_hold_out(
                    model,
                    train_dataset=fun_control["train"],
                    shuffle=self.fun_control["shuffle"],
                    loss_function=self.fun_control["loss_function"],
                    metric=self.fun_control["metric_torch"],
                    test_dataset=fun_control["test"],
                    device=self.fun_control["device"],
                    show_batch_interval=self.fun_control["show_batch_interval"],
                    path=self.fun_control["path"],
                    task=self.fun_control["task"],
                    writer=self.fun_control["spot_writer"],
                    writerId=config_id,
                )
            else:  # eval == "train_hold_out"
                df_eval, _ = evaluate_hold_out(
                    model,
                    train_dataset=fun_control["train"],
                    shuffle=self.fun_control["shuffle"],
                    loss_function=self.fun_control["loss_function"],
                    metric=self.fun_control["metric_torch"],
                    device=self.fun_control["device"],
                    show_batch_interval=self.fun_control["show_batch_interval"],
                    path=self.fun_control["path"],
                    task=self.fun_control["task"],
                    writer=self.fun_control["spot_writer"],
                    writerId=config_id,
                )
        except Exception as err:
            print(f"Error in fun_torch(). Call to evaluate_model failed. {err=}, {type(err)=}")
            print("Setting df_eval to np.nan")
            df_eval = np.nan
        z_val = fun_control["weights"] * df_eval
        if self.fun_control["spot_writer"] is not None:
            writer = self.fun_control["spot_writer"]
            writer.add_hparams(config, {"fun_torch: loss": z_val})
            writer.flush()
        z_res = np.append(z_res, z_val)
    return z_res