popt.update_schemes.smcopt

  1# External imports
  2import numpy as np
  3import time
  4import pprint
  5
  6# Internal imports
  7from popt.loop.optimize import Optimize
  8import popt.update_schemes.optimizers as opt
  9from popt.misc_tools import optim_tools as ot
 10
 11
 12class SmcOpt(Optimize):
 13    """
 14    TODO: Write docstring ala EnOpt
 15    """
 16
 17    def __init__(self, fun, x, args, sens, bounds=None, **options):
 18        """
 19        Parameters
 20        ----------
 21        fun: callable
 22            objective function
 23
 24        x: ndarray
 25            Initial state
 26
 27        sens: callable
 28            Ensemble sensitivity
 29
 30        bounds: list, optional
 31            (min, max) pairs for each element in x. None is used to specify no bound.
 32
 33        options: dict
 34            Optimization options
 35
 36            - maxiter: maximum number of iterations (default 10)
 37            - restart: restart optimization from a restart file (default false)
 38            - restartsave: save a restart file after each successful iteration (defalut false)
 39            - tol: convergence tolerance for the objective function (default 1e-6)
 40            - alpha: weight between previous and new step (default 0.1)
 41            - alpha_maxiter: maximum number of backtracing trials (default 5)
 42            - resample: number indicating how many times resampling is tried if no improvement is found
 43            - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5)
 44            - inflation_factor: term used to weight down prior influence (defalult 1)
 45            - survival_factor: fraction of surviving samples
 46            - savedata: specify which class variables to save to the result files (state, objective function
 47                        value, iteration number, number of function evaluations, and number of gradient
 48                        evaluations, are always saved)
 49        """
 50
 51        # init PETEnsemble
 52        super(SmcOpt, self).__init__(**options)
 53
 54        def __set__variable(var_name=None, defalut=None):
 55            if var_name in options:
 56                return options[var_name]
 57            else:
 58                return defalut
 59
 60        # Set input as class variables
 61        self.options = options  # options
 62        self.fun = fun  # objective function
 63        self.sens = sens  # gradient function
 64        self.bounds = bounds  # parameter bounds
 65        self.mean_state = x  # initial mean state
 66        self.best_state = None  # best ensemble member
 67        self.cov = args[0]  # covariance matrix for sampling
 68
 69        # Set other optimization parameters
 70        self.obj_func_tol = __set__variable('tol', 1e-6)
 71        self.alpha = __set__variable('alpha', 0.1)
 72        self.alpha_iter_max = __set__variable('alpha_maxiter', 5)
 73        self.max_resample = __set__variable('resample', 0)
 74        self.cov_factor = __set__variable('cov_factor', 0.5)
 75        self.inflation_factor = __set__variable('inflation_factor', 1)
 76        self.survival_factor = __set__variable('survival_factor', 0)
 77
 78        # Calculate objective function of startpoint
 79        if not self.restart:
 80            self.start_time = time.perf_counter()
 81            self.obj_func_values = self.fun(self.mean_state)
 82            self.best_func = np.mean(self.obj_func_values)
 83            self.nfev += 1
 84            self.optimize_result = ot.get_optimize_result(self)
 85            ot.save_optimize_results(self.optimize_result)
 86            if self.logger is not None:
 87                self.logger.info('       ====== Running optimization - SmcOpt ======')
 88                self.logger.info('\n' + pprint.pformat(self.options))
 89                info_str = '       {:<10} {:<10} {:<15} {:<15} '.format('iter', 'alpha_iter',
 90                                                                           'obj_func', 'step-size')
 91                self.logger.info(info_str)
 92                self.logger.info('       {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))
 93
 94        self.optimizer = opt.GradientAscent(self.alpha, 0)
 95
 96        # The SmcOpt class self-ignites
 97        self.run_loop()  # run_loop resides in the Optimization class (super)
 98
 99    def calc_update(self,):
100        """
101        Update using sequential monte carlo method
102        """
103
104        improvement = False
105        success = False
106        resampling_iter = 0
107        inflate = 2 * (self.inflation_factor + self.iteration)
108
109        while improvement is False:  # resampling loop
110
111            # Shrink covariance each time we try resampling
112            shrink = self.cov_factor ** resampling_iter
113
114            # Calc sensitivity
115            (sens_matrix, self.best_state, best_func_tmp) = self.sens(self.mean_state, inflate,
116                                                                      shrink*self.cov, self.survival_factor)
117            self.njev += 1
118
119            # Initialize for this step
120            alpha_iter = 0
121
122            while improvement is False:  # backtracking loop
123
124                search_direction = sens_matrix
125                new_state = self.optimizer.apply_smc_update(self.mean_state, search_direction, iter=self.iteration)
126                new_state = ot.clip_state(new_state, self.bounds)
127
128                # Calculate new objective function
129                new_func_values = self.fun(new_state)
130                self.nfev += 1
131
132                if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol or \
133                          (self.best_func - best_func_tmp) > self.obj_func_tol:
134
135                    # Update objective function values and step
136                    self.obj_func_values = new_func_values
137                    self.mean_state = new_state
138                    if (self.best_func - best_func_tmp) > self.obj_func_tol:
139                        self.best_func = best_func_tmp
140
141                    # Write logging info
142                    if self.logger is not None:
143                        info_str_iter = '       {:<10} {:<10} {:<15.4e} {:<15.2e}'. \
144                            format(self.iteration, alpha_iter, self.best_func,
145                                   self.alpha)
146                        self.logger.info(info_str_iter)
147
148                    # Iteration was a success
149                    improvement = True
150                    success = True
151                    self.optimizer.restore_parameters()
152
153                    # Save variables defined in savedata keyword.
154                    self.optimize_result = ot.get_optimize_result(self)
155                    ot.save_optimize_results(self.optimize_result)
156
157                    # Update iteration counter if iteration was successful and save current state
158                    self.iteration += 1
159
160                else:
161
162                    # If we do not have a reduction in the objective function, we reduce the step limiter
163                    if alpha_iter < self.alpha_iter_max:
164                        self.optimizer.apply_backtracking()  # decrease alpha
165                        alpha_iter += 1
166                    elif (resampling_iter < self.max_resample and
167                          np.mean(new_func_values) - np.mean(self.obj_func_values) > 0):  # update gradient
168                        resampling_iter += 1
169                        self.optimizer.restore_parameters()
170                        break
171                    else:
172                        success = False
173                        return success
174
175        return success
class SmcOpt(popt.loop.optimize.Optimize):
 13class SmcOpt(Optimize):
 14    """
 15    TODO: Write docstring ala EnOpt
 16    """
 17
 18    def __init__(self, fun, x, args, sens, bounds=None, **options):
 19        """
 20        Parameters
 21        ----------
 22        fun: callable
 23            objective function
 24
 25        x: ndarray
 26            Initial state
 27
 28        sens: callable
 29            Ensemble sensitivity
 30
 31        bounds: list, optional
 32            (min, max) pairs for each element in x. None is used to specify no bound.
 33
 34        options: dict
 35            Optimization options
 36
 37            - maxiter: maximum number of iterations (default 10)
 38            - restart: restart optimization from a restart file (default false)
 39            - restartsave: save a restart file after each successful iteration (defalut false)
 40            - tol: convergence tolerance for the objective function (default 1e-6)
 41            - alpha: weight between previous and new step (default 0.1)
 42            - alpha_maxiter: maximum number of backtracing trials (default 5)
 43            - resample: number indicating how many times resampling is tried if no improvement is found
 44            - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5)
 45            - inflation_factor: term used to weight down prior influence (defalult 1)
 46            - survival_factor: fraction of surviving samples
 47            - savedata: specify which class variables to save to the result files (state, objective function
 48                        value, iteration number, number of function evaluations, and number of gradient
 49                        evaluations, are always saved)
 50        """
 51
 52        # init PETEnsemble
 53        super(SmcOpt, self).__init__(**options)
 54
 55        def __set__variable(var_name=None, defalut=None):
 56            if var_name in options:
 57                return options[var_name]
 58            else:
 59                return defalut
 60
 61        # Set input as class variables
 62        self.options = options  # options
 63        self.fun = fun  # objective function
 64        self.sens = sens  # gradient function
 65        self.bounds = bounds  # parameter bounds
 66        self.mean_state = x  # initial mean state
 67        self.best_state = None  # best ensemble member
 68        self.cov = args[0]  # covariance matrix for sampling
 69
 70        # Set other optimization parameters
 71        self.obj_func_tol = __set__variable('tol', 1e-6)
 72        self.alpha = __set__variable('alpha', 0.1)
 73        self.alpha_iter_max = __set__variable('alpha_maxiter', 5)
 74        self.max_resample = __set__variable('resample', 0)
 75        self.cov_factor = __set__variable('cov_factor', 0.5)
 76        self.inflation_factor = __set__variable('inflation_factor', 1)
 77        self.survival_factor = __set__variable('survival_factor', 0)
 78
 79        # Calculate objective function of startpoint
 80        if not self.restart:
 81            self.start_time = time.perf_counter()
 82            self.obj_func_values = self.fun(self.mean_state)
 83            self.best_func = np.mean(self.obj_func_values)
 84            self.nfev += 1
 85            self.optimize_result = ot.get_optimize_result(self)
 86            ot.save_optimize_results(self.optimize_result)
 87            if self.logger is not None:
 88                self.logger.info('       ====== Running optimization - SmcOpt ======')
 89                self.logger.info('\n' + pprint.pformat(self.options))
 90                info_str = '       {:<10} {:<10} {:<15} {:<15} '.format('iter', 'alpha_iter',
 91                                                                           'obj_func', 'step-size')
 92                self.logger.info(info_str)
 93                self.logger.info('       {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))
 94
 95        self.optimizer = opt.GradientAscent(self.alpha, 0)
 96
 97        # The SmcOpt class self-ignites
 98        self.run_loop()  # run_loop resides in the Optimization class (super)
 99
100    def calc_update(self,):
101        """
102        Update using sequential monte carlo method
103        """
104
105        improvement = False
106        success = False
107        resampling_iter = 0
108        inflate = 2 * (self.inflation_factor + self.iteration)
109
110        while improvement is False:  # resampling loop
111
112            # Shrink covariance each time we try resampling
113            shrink = self.cov_factor ** resampling_iter
114
115            # Calc sensitivity
116            (sens_matrix, self.best_state, best_func_tmp) = self.sens(self.mean_state, inflate,
117                                                                      shrink*self.cov, self.survival_factor)
118            self.njev += 1
119
120            # Initialize for this step
121            alpha_iter = 0
122
123            while improvement is False:  # backtracking loop
124
125                search_direction = sens_matrix
126                new_state = self.optimizer.apply_smc_update(self.mean_state, search_direction, iter=self.iteration)
127                new_state = ot.clip_state(new_state, self.bounds)
128
129                # Calculate new objective function
130                new_func_values = self.fun(new_state)
131                self.nfev += 1
132
133                if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol or \
134                          (self.best_func - best_func_tmp) > self.obj_func_tol:
135
136                    # Update objective function values and step
137                    self.obj_func_values = new_func_values
138                    self.mean_state = new_state
139                    if (self.best_func - best_func_tmp) > self.obj_func_tol:
140                        self.best_func = best_func_tmp
141
142                    # Write logging info
143                    if self.logger is not None:
144                        info_str_iter = '       {:<10} {:<10} {:<15.4e} {:<15.2e}'. \
145                            format(self.iteration, alpha_iter, self.best_func,
146                                   self.alpha)
147                        self.logger.info(info_str_iter)
148
149                    # Iteration was a success
150                    improvement = True
151                    success = True
152                    self.optimizer.restore_parameters()
153
154                    # Save variables defined in savedata keyword.
155                    self.optimize_result = ot.get_optimize_result(self)
156                    ot.save_optimize_results(self.optimize_result)
157
158                    # Update iteration counter if iteration was successful and save current state
159                    self.iteration += 1
160
161                else:
162
163                    # If we do not have a reduction in the objective function, we reduce the step limiter
164                    if alpha_iter < self.alpha_iter_max:
165                        self.optimizer.apply_backtracking()  # decrease alpha
166                        alpha_iter += 1
167                    elif (resampling_iter < self.max_resample and
168                          np.mean(new_func_values) - np.mean(self.obj_func_values) > 0):  # update gradient
169                        resampling_iter += 1
170                        self.optimizer.restore_parameters()
171                        break
172                    else:
173                        success = False
174                        return success
175
176        return success

TODO: Write docstring ala EnOpt

SmcOpt(fun, x, args, sens, bounds=None, **options)
18    def __init__(self, fun, x, args, sens, bounds=None, **options):
19        """
20        Parameters
21        ----------
22        fun: callable
23            objective function
24
25        x: ndarray
26            Initial state
27
28        sens: callable
29            Ensemble sensitivity
30
31        bounds: list, optional
32            (min, max) pairs for each element in x. None is used to specify no bound.
33
34        options: dict
35            Optimization options
36
37            - maxiter: maximum number of iterations (default 10)
38            - restart: restart optimization from a restart file (default false)
39            - restartsave: save a restart file after each successful iteration (defalut false)
40            - tol: convergence tolerance for the objective function (default 1e-6)
41            - alpha: weight between previous and new step (default 0.1)
42            - alpha_maxiter: maximum number of backtracing trials (default 5)
43            - resample: number indicating how many times resampling is tried if no improvement is found
44            - cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5)
45            - inflation_factor: term used to weight down prior influence (defalult 1)
46            - survival_factor: fraction of surviving samples
47            - savedata: specify which class variables to save to the result files (state, objective function
48                        value, iteration number, number of function evaluations, and number of gradient
49                        evaluations, are always saved)
50        """
51
52        # init PETEnsemble
53        super(SmcOpt, self).__init__(**options)
54
55        def __set__variable(var_name=None, defalut=None):
56            if var_name in options:
57                return options[var_name]
58            else:
59                return defalut
60
61        # Set input as class variables
62        self.options = options  # options
63        self.fun = fun  # objective function
64        self.sens = sens  # gradient function
65        self.bounds = bounds  # parameter bounds
66        self.mean_state = x  # initial mean state
67        self.best_state = None  # best ensemble member
68        self.cov = args[0]  # covariance matrix for sampling
69
70        # Set other optimization parameters
71        self.obj_func_tol = __set__variable('tol', 1e-6)
72        self.alpha = __set__variable('alpha', 0.1)
73        self.alpha_iter_max = __set__variable('alpha_maxiter', 5)
74        self.max_resample = __set__variable('resample', 0)
75        self.cov_factor = __set__variable('cov_factor', 0.5)
76        self.inflation_factor = __set__variable('inflation_factor', 1)
77        self.survival_factor = __set__variable('survival_factor', 0)
78
79        # Calculate objective function of startpoint
80        if not self.restart:
81            self.start_time = time.perf_counter()
82            self.obj_func_values = self.fun(self.mean_state)
83            self.best_func = np.mean(self.obj_func_values)
84            self.nfev += 1
85            self.optimize_result = ot.get_optimize_result(self)
86            ot.save_optimize_results(self.optimize_result)
87            if self.logger is not None:
88                self.logger.info('       ====== Running optimization - SmcOpt ======')
89                self.logger.info('\n' + pprint.pformat(self.options))
90                info_str = '       {:<10} {:<10} {:<15} {:<15} '.format('iter', 'alpha_iter',
91                                                                           'obj_func', 'step-size')
92                self.logger.info(info_str)
93                self.logger.info('       {:<21} {:<15.4e}'.format(self.iteration, np.mean(self.obj_func_values)))
94
95        self.optimizer = opt.GradientAscent(self.alpha, 0)
96
97        # The SmcOpt class self-ignites
98        self.run_loop()  # run_loop resides in the Optimization class (super)
Parameters
  • fun (callable): objective function
  • x (ndarray): Initial state
  • sens (callable): Ensemble sensitivity
  • bounds (list, optional): (min, max) pairs for each element in x. None is used to specify no bound.
  • options (dict): Optimization options

    • maxiter: maximum number of iterations (default 10)
    • restart: restart optimization from a restart file (default false)
    • restartsave: save a restart file after each successful iteration (defalut false)
    • tol: convergence tolerance for the objective function (default 1e-6)
    • alpha: weight between previous and new step (default 0.1)
    • alpha_maxiter: maximum number of backtracing trials (default 5)
    • resample: number indicating how many times resampling is tried if no improvement is found
    • cov_factor: factor used to shrink the covariance for each resampling trial (defalut 0.5)
    • inflation_factor: term used to weight down prior influence (defalult 1)
    • survival_factor: fraction of surviving samples
    • savedata: specify which class variables to save to the result files (state, objective function value, iteration number, number of function evaluations, and number of gradient evaluations, are always saved)
options
fun
sens
bounds
mean_state
best_state
cov
obj_func_tol
alpha
alpha_iter_max
max_resample
cov_factor
inflation_factor
survival_factor
optimizer
def calc_update(self):
100    def calc_update(self,):
101        """
102        Update using sequential monte carlo method
103        """
104
105        improvement = False
106        success = False
107        resampling_iter = 0
108        inflate = 2 * (self.inflation_factor + self.iteration)
109
110        while improvement is False:  # resampling loop
111
112            # Shrink covariance each time we try resampling
113            shrink = self.cov_factor ** resampling_iter
114
115            # Calc sensitivity
116            (sens_matrix, self.best_state, best_func_tmp) = self.sens(self.mean_state, inflate,
117                                                                      shrink*self.cov, self.survival_factor)
118            self.njev += 1
119
120            # Initialize for this step
121            alpha_iter = 0
122
123            while improvement is False:  # backtracking loop
124
125                search_direction = sens_matrix
126                new_state = self.optimizer.apply_smc_update(self.mean_state, search_direction, iter=self.iteration)
127                new_state = ot.clip_state(new_state, self.bounds)
128
129                # Calculate new objective function
130                new_func_values = self.fun(new_state)
131                self.nfev += 1
132
133                if np.mean(self.obj_func_values) - np.mean(new_func_values) > self.obj_func_tol or \
134                          (self.best_func - best_func_tmp) > self.obj_func_tol:
135
136                    # Update objective function values and step
137                    self.obj_func_values = new_func_values
138                    self.mean_state = new_state
139                    if (self.best_func - best_func_tmp) > self.obj_func_tol:
140                        self.best_func = best_func_tmp
141
142                    # Write logging info
143                    if self.logger is not None:
144                        info_str_iter = '       {:<10} {:<10} {:<15.4e} {:<15.2e}'. \
145                            format(self.iteration, alpha_iter, self.best_func,
146                                   self.alpha)
147                        self.logger.info(info_str_iter)
148
149                    # Iteration was a success
150                    improvement = True
151                    success = True
152                    self.optimizer.restore_parameters()
153
154                    # Save variables defined in savedata keyword.
155                    self.optimize_result = ot.get_optimize_result(self)
156                    ot.save_optimize_results(self.optimize_result)
157
158                    # Update iteration counter if iteration was successful and save current state
159                    self.iteration += 1
160
161                else:
162
163                    # If we do not have a reduction in the objective function, we reduce the step limiter
164                    if alpha_iter < self.alpha_iter_max:
165                        self.optimizer.apply_backtracking()  # decrease alpha
166                        alpha_iter += 1
167                    elif (resampling_iter < self.max_resample and
168                          np.mean(new_func_values) - np.mean(self.obj_func_values) > 0):  # update gradient
169                        resampling_iter += 1
170                        self.optimizer.restore_parameters()
171                        break
172                    else:
173                        success = False
174                        return success
175
176        return success

Update using sequential monte carlo method