popt.update_schemes.enopt
1# External imports 2import numpy as np 3from numpy import linalg as la 4import time 5import pprint 6 7# Internal imports 8from popt.misc_tools import optim_tools as ot 9from popt.loop.optimize import Optimize 10import popt.update_schemes.optimizers as opt 11 12 13class EnOpt(Optimize): 14 r""" 15 This is an implementation of the ensemble steepest descent ensemble optimization algorithm - EnOpt. 16 The update of the control variable is done with the simple steepest (or gradient) descent algorithm: 17 18 .. math:: 19 x_l = x_{l-1} - \alpha \times C \times G 20 21 where :math:`x` is the control variable, :math:`l` is the iteration index, :math:`\alpha` is the step size, 22 :math:`C` is a smoothing matrix (e.g., covariance matrix for :math:`x`), and :math:`G` is the ensemble gradient. 23 24 Methods 25 ------- 26 calc_update() 27 Update using steepest descent method with ensemble gradient 28 29 References 30 ---------- 31 Chen et al., 2009, 'Efficient Ensemble-Based Closed-Loop Production Optimization', SPE Journal, 14 (4): 634-645. 32 """ 33 34 def __init__(self, fun, x, args, jac, hess, bounds=None, **options): 35 36 """ 37 Parameters 38 ---------- 39 fun: callable 40 objective function 41 42 x: ndarray 43 Initial state 44 45 args: tuple 46 Initial covariance 47 48 jac: callable 49 Gradient function 50 51 hess: callable 52 Hessian function 53 54 bounds: list, optional 55 (min, max) pairs for each element in x. None is used to specify no bound. 56 57 options: dict 58 Optimization options 59 60 - maxiter: maximum number of iterations (default 10) 61 - restart: restart optimization from a restart file (default false) 62 - restartsave: save a restart file after each successful iteration (defalut false) 63 - tol: convergence tolerance for the objective function (default 1e-6) 64 - alpha: step size for the steepest decent method (default 0.1) 65 - beta: momentum coefficient for running accelerated optimization (default 0.0) 66 - alpha_maxiter: maximum number of backtracing trials (default 5) 67 - resample: number indicating how many times resampling is tried if no improvement is found 68 - optimizer: 'GA' (gradient accent) or Adam (default 'GA') 69 - nesterov: use Nesterov acceleration if true (default false) 70 - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) 71 - normalize: normalize the gradient if true (default true) 72 - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) 73 - savedata: specify which class variables to save to the result files (state, objective 74 function value, iteration number, number of function evaluations, and number 75 of gradient evaluations, are always saved) 76 """ 77 78 # init PETEnsemble 79 super(EnOpt, self).__init__(**options) 80 81 def __set__variable(var_name=None, defalut=None): 82 if var_name in options: 83 return options[var_name] 84 else: 85 return defalut 86 87 # Set input as class variables 88 self.options = options # options 89 self.fun = fun # objective function 90 self.cov = args[0] # initial covariance 91 self.jac = jac # gradient function 92 self.hess = hess # hessian function 93 self.bounds = bounds # parameter bounds 94 self.mean_state = x # initial mean state 95 96 # Set other optimization parameters 97 self.obj_func_tol = __set__variable('tol', 1e-6) 98 self.alpha = __set__variable('alpha', 0.1) 99 self.alpha_cov = __set__variable('alpha_cov', 0.001) 100 self.beta = __set__variable('beta', 0.0) # this is stored in the optimizer class 101 self.nesterov = __set__variable('nesterov', False) # use Nesterov acceleration if value is true 102 self.alpha_iter_max = __set__variable('alpha_maxiter', 5) 103 self.max_resample = __set__variable('resample', 0) 104 self.use_hessian = __set__variable('hessian', False) 105 self.normalize = __set__variable('normalize', True) 106 self.cov_factor = __set__variable('cov_factor', 0.5) 107 108 # Initialize other variables 109 self.state_step = 0 # state step 110 self.cov_step = 0 # covariance step 111 112 # Calculate objective function of startpoint 113 if not self.restart: 114 self.start_time = time.perf_counter() 115 self.obj_func_values = self.fun(self.mean_state, **self.epf) 116 self.nfev += 1 117 self.optimize_result = ot.get_optimize_result(self) 118 ot.save_optimize_results(self.optimize_result) 119 if self.logger is not None: 120 self.logger.info('\n\n') 121 self.logger.info(' ====== Running optimization - EnOpt ======') 122 self.logger.info('\n'+pprint.pformat(self.options)) 123 info_str = ' {:<10} {:<10} {:<15} {:<15} {:<15} '.format('iter', 'alpha_iter', 124 'obj_func', 'step-size', 'cov[0,0]') 125 self.logger.info(info_str) 126 self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values))) 127 128 # Initialize optimizer 129 optimizer = __set__variable('optimizer', 'GA') 130 if optimizer == 'GA': 131 self.optimizer = opt.GradientAscent(self.alpha, self.beta) 132 elif optimizer == 'Adam': 133 self.optimizer = opt.Adam(self.alpha, self.beta) 134 elif optimizer == 'AdaMax': 135 self.normalize = False 136 self.optimizer = opt.AdaMax(self.alpha, self.beta) 137 138 # The EnOpt class self-ignites, and it is possible to send the EnOpt class as a callale method to scipy.minimize 139 self.run_loop() # run_loop resides in the Optimization class (super) 140 141 def calc_update(self): 142 """ 143 Update using steepest descent method with ensemble gradients 144 """ 145 146 # Initialize variables for this step 147 improvement = False 148 success = False 149 resampling_iter = 0 150 151 while improvement is False: # resampling loop 152 153 # Shrink covariance each time we try resampling 154 shrink = self.cov_factor ** resampling_iter 155 156 # Calculate gradient 157 if self.nesterov: 158 gradient = self.jac(self.mean_state + self.beta*self.state_step, 159 shrink*(self.cov + self.beta*self.cov_step), **self.epf) 160 else: 161 gradient = self.jac(self.mean_state, shrink*self.cov, **self.epf) 162 self.njev += 1 163 164 # Compute the hessian 165 hessian = self.hess() 166 if self.use_hessian: 167 inv_hessian = np.linalg.inv(hessian) 168 gradient = inv_hessian @ (self.cov @ self.cov) @ gradient 169 if self.normalize: 170 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 171 elif self.normalize: 172 gradient /= np.maximum(la.norm(gradient, np.inf), 1e-12) # scale the gradient with inf-norm 173 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 174 175 # Initialize for this step 176 alpha_iter = 0 177 178 while improvement is False: # backtracking loop 179 180 new_state, new_step = self.optimizer.apply_update(self.mean_state, gradient, iter=self.iteration) 181 new_state = ot.clip_state(new_state, self.bounds) 182 183 # Calculate new objective function 184 new_func_values = self.fun(new_state, **self.epf) 185 self.nfev += 1 186 187 if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol: 188 189 # Update objective function values and state 190 self.obj_func_values = new_func_values 191 self.mean_state = new_state 192 self.state_step = new_step 193 self.alpha = self.optimizer.get_step_size() 194 195 # Update covariance (currently we don't apply backtracking for alpha_cov) 196 self.cov_step = self.alpha_cov * hessian + self.beta * self.cov_step 197 self.cov = self.cov - self.cov_step 198 self.cov = ot.get_sym_pos_semidef(self.cov) 199 200 # Write logging info 201 if self.logger is not None: 202 info_str_iter = ' {:<10} {:<10} {:<15.4e} {:<15.2e} {:<15.2e}'.\ 203 format(self.iteration, alpha_iter, np.mean(self.obj_func_values), 204 self.alpha, self.cov[0, 0]) 205 self.logger.info(info_str_iter) 206 207 # Update step size in the one-dimensional case 208 if new_state.size == 1: 209 self.optimizer.step_size /= 2 210 211 # Iteration was a success 212 improvement = True 213 success = True 214 self.optimizer.restore_parameters() 215 216 # Save variables defined in savedata keyword. 217 self.optimize_result = ot.get_optimize_result(self) 218 ot.save_optimize_results(self.optimize_result) 219 220 # Update iteration counter if iteration was successful and save current state 221 self.iteration += 1 222 223 else: 224 225 # If we do not have a reduction in the objective function, we reduce the step limiter 226 if alpha_iter < self.alpha_iter_max: 227 self.optimizer.apply_backtracking() # decrease alpha 228 alpha_iter += 1 229 elif (resampling_iter < self.max_resample and 230 np.mean(new_func_values) - np.mean(self.obj_func_values) > 0): # update gradient 231 resampling_iter += 1 232 self.optimizer.restore_parameters() 233 break 234 else: 235 success = False 236 return success 237 238 return success 239 240 241 242 243 244
14class EnOpt(Optimize): 15 r""" 16 This is an implementation of the ensemble steepest descent ensemble optimization algorithm - EnOpt. 17 The update of the control variable is done with the simple steepest (or gradient) descent algorithm: 18 19 .. math:: 20 x_l = x_{l-1} - \alpha \times C \times G 21 22 where :math:`x` is the control variable, :math:`l` is the iteration index, :math:`\alpha` is the step size, 23 :math:`C` is a smoothing matrix (e.g., covariance matrix for :math:`x`), and :math:`G` is the ensemble gradient. 24 25 Methods 26 ------- 27 calc_update() 28 Update using steepest descent method with ensemble gradient 29 30 References 31 ---------- 32 Chen et al., 2009, 'Efficient Ensemble-Based Closed-Loop Production Optimization', SPE Journal, 14 (4): 634-645. 33 """ 34 35 def __init__(self, fun, x, args, jac, hess, bounds=None, **options): 36 37 """ 38 Parameters 39 ---------- 40 fun: callable 41 objective function 42 43 x: ndarray 44 Initial state 45 46 args: tuple 47 Initial covariance 48 49 jac: callable 50 Gradient function 51 52 hess: callable 53 Hessian function 54 55 bounds: list, optional 56 (min, max) pairs for each element in x. None is used to specify no bound. 57 58 options: dict 59 Optimization options 60 61 - maxiter: maximum number of iterations (default 10) 62 - restart: restart optimization from a restart file (default false) 63 - restartsave: save a restart file after each successful iteration (defalut false) 64 - tol: convergence tolerance for the objective function (default 1e-6) 65 - alpha: step size for the steepest decent method (default 0.1) 66 - beta: momentum coefficient for running accelerated optimization (default 0.0) 67 - alpha_maxiter: maximum number of backtracing trials (default 5) 68 - resample: number indicating how many times resampling is tried if no improvement is found 69 - optimizer: 'GA' (gradient accent) or Adam (default 'GA') 70 - nesterov: use Nesterov acceleration if true (default false) 71 - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) 72 - normalize: normalize the gradient if true (default true) 73 - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) 74 - savedata: specify which class variables to save to the result files (state, objective 75 function value, iteration number, number of function evaluations, and number 76 of gradient evaluations, are always saved) 77 """ 78 79 # init PETEnsemble 80 super(EnOpt, self).__init__(**options) 81 82 def __set__variable(var_name=None, defalut=None): 83 if var_name in options: 84 return options[var_name] 85 else: 86 return defalut 87 88 # Set input as class variables 89 self.options = options # options 90 self.fun = fun # objective function 91 self.cov = args[0] # initial covariance 92 self.jac = jac # gradient function 93 self.hess = hess # hessian function 94 self.bounds = bounds # parameter bounds 95 self.mean_state = x # initial mean state 96 97 # Set other optimization parameters 98 self.obj_func_tol = __set__variable('tol', 1e-6) 99 self.alpha = __set__variable('alpha', 0.1) 100 self.alpha_cov = __set__variable('alpha_cov', 0.001) 101 self.beta = __set__variable('beta', 0.0) # this is stored in the optimizer class 102 self.nesterov = __set__variable('nesterov', False) # use Nesterov acceleration if value is true 103 self.alpha_iter_max = __set__variable('alpha_maxiter', 5) 104 self.max_resample = __set__variable('resample', 0) 105 self.use_hessian = __set__variable('hessian', False) 106 self.normalize = __set__variable('normalize', True) 107 self.cov_factor = __set__variable('cov_factor', 0.5) 108 109 # Initialize other variables 110 self.state_step = 0 # state step 111 self.cov_step = 0 # covariance step 112 113 # Calculate objective function of startpoint 114 if not self.restart: 115 self.start_time = time.perf_counter() 116 self.obj_func_values = self.fun(self.mean_state, **self.epf) 117 self.nfev += 1 118 self.optimize_result = ot.get_optimize_result(self) 119 ot.save_optimize_results(self.optimize_result) 120 if self.logger is not None: 121 self.logger.info('\n\n') 122 self.logger.info(' ====== Running optimization - EnOpt ======') 123 self.logger.info('\n'+pprint.pformat(self.options)) 124 info_str = ' {:<10} {:<10} {:<15} {:<15} {:<15} '.format('iter', 'alpha_iter', 125 'obj_func', 'step-size', 'cov[0,0]') 126 self.logger.info(info_str) 127 self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values))) 128 129 # Initialize optimizer 130 optimizer = __set__variable('optimizer', 'GA') 131 if optimizer == 'GA': 132 self.optimizer = opt.GradientAscent(self.alpha, self.beta) 133 elif optimizer == 'Adam': 134 self.optimizer = opt.Adam(self.alpha, self.beta) 135 elif optimizer == 'AdaMax': 136 self.normalize = False 137 self.optimizer = opt.AdaMax(self.alpha, self.beta) 138 139 # The EnOpt class self-ignites, and it is possible to send the EnOpt class as a callale method to scipy.minimize 140 self.run_loop() # run_loop resides in the Optimization class (super) 141 142 def calc_update(self): 143 """ 144 Update using steepest descent method with ensemble gradients 145 """ 146 147 # Initialize variables for this step 148 improvement = False 149 success = False 150 resampling_iter = 0 151 152 while improvement is False: # resampling loop 153 154 # Shrink covariance each time we try resampling 155 shrink = self.cov_factor ** resampling_iter 156 157 # Calculate gradient 158 if self.nesterov: 159 gradient = self.jac(self.mean_state + self.beta*self.state_step, 160 shrink*(self.cov + self.beta*self.cov_step), **self.epf) 161 else: 162 gradient = self.jac(self.mean_state, shrink*self.cov, **self.epf) 163 self.njev += 1 164 165 # Compute the hessian 166 hessian = self.hess() 167 if self.use_hessian: 168 inv_hessian = np.linalg.inv(hessian) 169 gradient = inv_hessian @ (self.cov @ self.cov) @ gradient 170 if self.normalize: 171 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 172 elif self.normalize: 173 gradient /= np.maximum(la.norm(gradient, np.inf), 1e-12) # scale the gradient with inf-norm 174 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 175 176 # Initialize for this step 177 alpha_iter = 0 178 179 while improvement is False: # backtracking loop 180 181 new_state, new_step = self.optimizer.apply_update(self.mean_state, gradient, iter=self.iteration) 182 new_state = ot.clip_state(new_state, self.bounds) 183 184 # Calculate new objective function 185 new_func_values = self.fun(new_state, **self.epf) 186 self.nfev += 1 187 188 if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol: 189 190 # Update objective function values and state 191 self.obj_func_values = new_func_values 192 self.mean_state = new_state 193 self.state_step = new_step 194 self.alpha = self.optimizer.get_step_size() 195 196 # Update covariance (currently we don't apply backtracking for alpha_cov) 197 self.cov_step = self.alpha_cov * hessian + self.beta * self.cov_step 198 self.cov = self.cov - self.cov_step 199 self.cov = ot.get_sym_pos_semidef(self.cov) 200 201 # Write logging info 202 if self.logger is not None: 203 info_str_iter = ' {:<10} {:<10} {:<15.4e} {:<15.2e} {:<15.2e}'.\ 204 format(self.iteration, alpha_iter, np.mean(self.obj_func_values), 205 self.alpha, self.cov[0, 0]) 206 self.logger.info(info_str_iter) 207 208 # Update step size in the one-dimensional case 209 if new_state.size == 1: 210 self.optimizer.step_size /= 2 211 212 # Iteration was a success 213 improvement = True 214 success = True 215 self.optimizer.restore_parameters() 216 217 # Save variables defined in savedata keyword. 218 self.optimize_result = ot.get_optimize_result(self) 219 ot.save_optimize_results(self.optimize_result) 220 221 # Update iteration counter if iteration was successful and save current state 222 self.iteration += 1 223 224 else: 225 226 # If we do not have a reduction in the objective function, we reduce the step limiter 227 if alpha_iter < self.alpha_iter_max: 228 self.optimizer.apply_backtracking() # decrease alpha 229 alpha_iter += 1 230 elif (resampling_iter < self.max_resample and 231 np.mean(new_func_values) - np.mean(self.obj_func_values) > 0): # update gradient 232 resampling_iter += 1 233 self.optimizer.restore_parameters() 234 break 235 else: 236 success = False 237 return success 238 239 return success
This is an implementation of the ensemble steepest descent ensemble optimization algorithm - EnOpt. The update of the control variable is done with the simple steepest (or gradient) descent algorithm:
$$x_l = x_{l-1} - \alpha \times C \times G$$
where \( x \) is the control variable, \( l \) is the iteration index, \( \alpha \) is the step size, \( C \) is a smoothing matrix (e.g., covariance matrix for \( x \)), and \( G \) is the ensemble gradient.
Methods
calc_update() Update using steepest descent method with ensemble gradient
References
Chen et al., 2009, 'Efficient Ensemble-Based Closed-Loop Production Optimization', SPE Journal, 14 (4): 634-645.
EnOpt(fun, x, args, jac, hess, bounds=None, **options)
35 def __init__(self, fun, x, args, jac, hess, bounds=None, **options): 36 37 """ 38 Parameters 39 ---------- 40 fun: callable 41 objective function 42 43 x: ndarray 44 Initial state 45 46 args: tuple 47 Initial covariance 48 49 jac: callable 50 Gradient function 51 52 hess: callable 53 Hessian function 54 55 bounds: list, optional 56 (min, max) pairs for each element in x. None is used to specify no bound. 57 58 options: dict 59 Optimization options 60 61 - maxiter: maximum number of iterations (default 10) 62 - restart: restart optimization from a restart file (default false) 63 - restartsave: save a restart file after each successful iteration (defalut false) 64 - tol: convergence tolerance for the objective function (default 1e-6) 65 - alpha: step size for the steepest decent method (default 0.1) 66 - beta: momentum coefficient for running accelerated optimization (default 0.0) 67 - alpha_maxiter: maximum number of backtracing trials (default 5) 68 - resample: number indicating how many times resampling is tried if no improvement is found 69 - optimizer: 'GA' (gradient accent) or Adam (default 'GA') 70 - nesterov: use Nesterov acceleration if true (default false) 71 - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) 72 - normalize: normalize the gradient if true (default true) 73 - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) 74 - savedata: specify which class variables to save to the result files (state, objective 75 function value, iteration number, number of function evaluations, and number 76 of gradient evaluations, are always saved) 77 """ 78 79 # init PETEnsemble 80 super(EnOpt, self).__init__(**options) 81 82 def __set__variable(var_name=None, defalut=None): 83 if var_name in options: 84 return options[var_name] 85 else: 86 return defalut 87 88 # Set input as class variables 89 self.options = options # options 90 self.fun = fun # objective function 91 self.cov = args[0] # initial covariance 92 self.jac = jac # gradient function 93 self.hess = hess # hessian function 94 self.bounds = bounds # parameter bounds 95 self.mean_state = x # initial mean state 96 97 # Set other optimization parameters 98 self.obj_func_tol = __set__variable('tol', 1e-6) 99 self.alpha = __set__variable('alpha', 0.1) 100 self.alpha_cov = __set__variable('alpha_cov', 0.001) 101 self.beta = __set__variable('beta', 0.0) # this is stored in the optimizer class 102 self.nesterov = __set__variable('nesterov', False) # use Nesterov acceleration if value is true 103 self.alpha_iter_max = __set__variable('alpha_maxiter', 5) 104 self.max_resample = __set__variable('resample', 0) 105 self.use_hessian = __set__variable('hessian', False) 106 self.normalize = __set__variable('normalize', True) 107 self.cov_factor = __set__variable('cov_factor', 0.5) 108 109 # Initialize other variables 110 self.state_step = 0 # state step 111 self.cov_step = 0 # covariance step 112 113 # Calculate objective function of startpoint 114 if not self.restart: 115 self.start_time = time.perf_counter() 116 self.obj_func_values = self.fun(self.mean_state, **self.epf) 117 self.nfev += 1 118 self.optimize_result = ot.get_optimize_result(self) 119 ot.save_optimize_results(self.optimize_result) 120 if self.logger is not None: 121 self.logger.info('\n\n') 122 self.logger.info(' ====== Running optimization - EnOpt ======') 123 self.logger.info('\n'+pprint.pformat(self.options)) 124 info_str = ' {:<10} {:<10} {:<15} {:<15} {:<15} '.format('iter', 'alpha_iter', 125 'obj_func', 'step-size', 'cov[0,0]') 126 self.logger.info(info_str) 127 self.logger.info(' {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values))) 128 129 # Initialize optimizer 130 optimizer = __set__variable('optimizer', 'GA') 131 if optimizer == 'GA': 132 self.optimizer = opt.GradientAscent(self.alpha, self.beta) 133 elif optimizer == 'Adam': 134 self.optimizer = opt.Adam(self.alpha, self.beta) 135 elif optimizer == 'AdaMax': 136 self.normalize = False 137 self.optimizer = opt.AdaMax(self.alpha, self.beta) 138 139 # The EnOpt class self-ignites, and it is possible to send the EnOpt class as a callale method to scipy.minimize 140 self.run_loop() # run_loop resides in the Optimization class (super)
Parameters
- fun (callable): objective function
- x (ndarray): Initial state
- args (tuple): Initial covariance
- jac (callable): Gradient function
- hess (callable): Hessian function
- bounds (list, optional): (min, max) pairs for each element in x. None is used to specify no bound.
options (dict): Optimization options
- maxiter: maximum number of iterations (default 10) - restart: restart optimization from a restart file (default false) - restartsave: save a restart file after each successful iteration (defalut false) - tol: convergence tolerance for the objective function (default 1e-6) - alpha: step size for the steepest decent method (default 0.1) - beta: momentum coefficient for running accelerated optimization (default 0.0) - alpha_maxiter: maximum number of backtracing trials (default 5) - resample: number indicating how many times resampling is tried if no improvement is found - optimizer: 'GA' (gradient accent) or Adam (default 'GA') - nesterov: use Nesterov acceleration if true (default false) - hessian: use Hessian approximation (if the algorithm permits use of Hessian) (default false) - normalize: normalize the gradient if true (default true) - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5) - savedata: specify which class variables to save to the result files (state, objective function value, iteration number, number of function evaluations, and number of gradient evaluations, are always saved)
def
calc_update(self):
142 def calc_update(self): 143 """ 144 Update using steepest descent method with ensemble gradients 145 """ 146 147 # Initialize variables for this step 148 improvement = False 149 success = False 150 resampling_iter = 0 151 152 while improvement is False: # resampling loop 153 154 # Shrink covariance each time we try resampling 155 shrink = self.cov_factor ** resampling_iter 156 157 # Calculate gradient 158 if self.nesterov: 159 gradient = self.jac(self.mean_state + self.beta*self.state_step, 160 shrink*(self.cov + self.beta*self.cov_step), **self.epf) 161 else: 162 gradient = self.jac(self.mean_state, shrink*self.cov, **self.epf) 163 self.njev += 1 164 165 # Compute the hessian 166 hessian = self.hess() 167 if self.use_hessian: 168 inv_hessian = np.linalg.inv(hessian) 169 gradient = inv_hessian @ (self.cov @ self.cov) @ gradient 170 if self.normalize: 171 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 172 elif self.normalize: 173 gradient /= np.maximum(la.norm(gradient, np.inf), 1e-12) # scale the gradient with inf-norm 174 hessian /= np.maximum(la.norm(hessian, np.inf), 1e-12) # scale the hessian with inf-norm 175 176 # Initialize for this step 177 alpha_iter = 0 178 179 while improvement is False: # backtracking loop 180 181 new_state, new_step = self.optimizer.apply_update(self.mean_state, gradient, iter=self.iteration) 182 new_state = ot.clip_state(new_state, self.bounds) 183 184 # Calculate new objective function 185 new_func_values = self.fun(new_state, **self.epf) 186 self.nfev += 1 187 188 if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol: 189 190 # Update objective function values and state 191 self.obj_func_values = new_func_values 192 self.mean_state = new_state 193 self.state_step = new_step 194 self.alpha = self.optimizer.get_step_size() 195 196 # Update covariance (currently we don't apply backtracking for alpha_cov) 197 self.cov_step = self.alpha_cov * hessian + self.beta * self.cov_step 198 self.cov = self.cov - self.cov_step 199 self.cov = ot.get_sym_pos_semidef(self.cov) 200 201 # Write logging info 202 if self.logger is not None: 203 info_str_iter = ' {:<10} {:<10} {:<15.4e} {:<15.2e} {:<15.2e}'.\ 204 format(self.iteration, alpha_iter, np.mean(self.obj_func_values), 205 self.alpha, self.cov[0, 0]) 206 self.logger.info(info_str_iter) 207 208 # Update step size in the one-dimensional case 209 if new_state.size == 1: 210 self.optimizer.step_size /= 2 211 212 # Iteration was a success 213 improvement = True 214 success = True 215 self.optimizer.restore_parameters() 216 217 # Save variables defined in savedata keyword. 218 self.optimize_result = ot.get_optimize_result(self) 219 ot.save_optimize_results(self.optimize_result) 220 221 # Update iteration counter if iteration was successful and save current state 222 self.iteration += 1 223 224 else: 225 226 # If we do not have a reduction in the objective function, we reduce the step limiter 227 if alpha_iter < self.alpha_iter_max: 228 self.optimizer.apply_backtracking() # decrease alpha 229 alpha_iter += 1 230 elif (resampling_iter < self.max_resample and 231 np.mean(new_func_values) - np.mean(self.obj_func_values) > 0): # update gradient 232 resampling_iter += 1 233 self.optimizer.restore_parameters() 234 break 235 else: 236 success = False 237 return success 238 239 return success
Update using steepest descent method with ensemble gradients