pipt.misc_tools.qaqc_tools

Quality Assurance of the forecast (QA) and analysis (QC) step.

  1"""Quality Assurance of the forecast (QA) and analysis (QC) step."""
  2import numpy as np
  3import os
  4# import matplotlib as mpl
  5# mpl.use('Qt5Agg')
  6import matplotlib.pyplot as plt
  7import matplotlib.patches as pat
  8import matplotlib.collections as mcoll
  9from matplotlib.colors import ListedColormap
 10import itertools
 11import logging
 12from pipt.misc_tools import cov_regularization
 13from scipy.interpolate import interp1d
 14from scipy.io import loadmat
 15# import cv2
 16
 17
 18# Define the class for qa/qc tools.
 19class QAQC:
 20    """
 21     Perform Quality Assurance of the forecast (QA) and analysis (QC) step.
 22     Available functions (developed in 4DSEIS project and not available yet):
 23
 24        - `calc_coverage`: check forecast data coverage
 25        - `calc_mahalanobis`: evaluate "higher-order" data coverage
 26        - `calc_kg`: check/write individual gain for parameters;
 27        -          flag data which have conflicting updates
 28        - `calc_da_stat`: compute statistics for updated parameters
 29
 30     Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
 31     """
 32
 33    # Initialize
 34    def __init__(self, keys, obs_data, datavar, logger=None, prior_info=None, sim=None, ini_state=None):
 35        self.keys = keys  # input info for the case
 36        self.obs_data = obs_data  # observed (real) data
 37        self.datavar = datavar  # data variance
 38        if logger is None:  # define a logger to print ouput
 39            logging.basicConfig(level=logging.INFO,
 40                                filename='qaqc_logger.log',
 41                                filemode='a',
 42                                format='%(asctime)s : %(levelname)s : %(name)s : %(message)s')
 43            self.logger = logging.getLogger('QAQC')
 44        else:
 45            self.logger = logging.getLogger('PET.PIPT.QCQA')
 46        self.prior_info = prior_info  # prior info for the different parameter types
 47        # this class contains potential writing functions (this class can be saved to debug_analysis)
 48        self.sim = sim
 49        self.ini_state = ini_state  # the first state; used to compute statistics
 50        self.ne = 0
 51        if self.ini_state is not None:
 52            # get the ensemble size from here
 53            self.ne = self.ini_state[list(self.ini_state.keys())[0]].shape[1]
 54
 55        assim_step = 0  # Assume simultaneous assimiation
 56        assim_ind = [keys['obsname'], keys['assimindex'][assim_step]]
 57        if isinstance(assim_ind[1], list):  # Check if prim. ind. is a list
 58            self.l_prim = [int(x) for x in assim_ind[1]]
 59        else:  # Float
 60            self.l_prim = [int(assim_ind[1])]
 61
 62        self.data_types = list(obs_data[0].keys())  # All data types
 63        self.en_obs = {}
 64        self.en_obs_vec = {}
 65        self.en_time = {}
 66        self.en_time_vec = {}
 67        for typ in self.data_types:
 68            self.en_obs[typ] = np.array(
 69                [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
 70                 is not None and self.obs_data[ind][typ].shape == (1,)])
 71            l = [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ] is not None
 72                 and self.obs_data[ind][typ].shape[0] > 1]
 73            if l:
 74                self.en_obs_vec[typ] = np.expand_dims(np.concatenate(l), 1)
 75            self.en_time[typ] = [ind for ind in self.l_prim if self.obs_data[ind][typ]
 76                                 is not None and self.obs_data[ind][typ].shape == (1,)]
 77            l = [ind for ind in self.l_prim if self.obs_data[ind][typ]
 78                 is not None and self.obs_data[ind][typ].shape[0] > 1]
 79            if l:
 80                self.en_time_vec[typ] = l
 81
 82        # Check if the QA folder is generated
 83        self.folder = 'QAQC' + os.sep
 84        if not os.path.exists(self.folder):
 85            os.mkdir(self.folder)  # if not generate
 86
 87        if 'localization' in self.keys:
 88            self.localization = cov_regularization.localization(self.keys['localization'],
 89                                                                self.keys['truedataindex'],
 90                                                                self.keys['datatype'],
 91                                                                self.keys['staticvar'],
 92                                                                self.ne)
 93        self.pred_data = None
 94        self.state = None
 95        self.en_fcst = {}
 96        self.en_fcst_vec = {}
 97        self.lam = None
 98
 99    # Set the predicted data and current state
100    def set(self, pred_data, state=None, lam=None):
101        self.pred_data = pred_data
102        for typ in self.data_types:
103            self.en_fcst[typ] = np.array(
104                [self.pred_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
105                 is not None and self.obs_data[ind][typ].shape == (1,)])
106            l = [self.pred_data[ind][typ] for ind in self.l_prim if self.obs_data[ind][typ] is not None
107                 and self.obs_data[ind][typ].shape[0] > 1]
108            if l:
109                self.en_fcst_vec[typ] = np.concatenate(l)
110        self.state = state
111        self.lam = lam
112
113    def calc_coverage(self, line=None):
114        """
115        Calculate the Data coverage for production and seismic data. For seismic data the plotting is based on the
116        importance-scaled coverage developed by Espen O. Lie from GeoCore.
117
118        Parameters
119        ----------
120        line : array-like, optional
121            If not None, plot 1D coverage.
122
123        Notes
124        -----
125        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
126        - NOTE: Not available in current version of PIPT
127        """
128
129    def calc_kg(self, options=None):
130        """
131        Check/write individual gain for parameters.
132        Note form ES gain with an identity Cd... This can be improved
133
134        Visualization of the many of these parameters is problem-specific. In reservoir simulation cases, it is necessary
135        to write this to the simulation grid. While for other applications, one might want other visualization. Hence,
136        the method also depends on a simulator specific writer.
137
138        Parameters
139        ----------
140        options : dict
141            Settings for the Kalman gain computations.
142            - 'num_store' : int, optional
143                Number of elements to store. Default is 10.
144            - 'unique_time' : bool, optional
145                Calculate for each time instance. Default is False.
146            - 'plot_all_kg' : bool, optional
147                Plot all the Kalman gains for the field parameters. If False, plot the num_store. Default is False.
148            - 'only_log' : bool, optional
149                Only write to the logger; no plotting. Default is True.
150            - 'auto_ada_loc' : bool, optional
151                Use localization in computations. Default is True.
152            - 'write_to_resinsight' : bool, optional
153                Pipe results to ResInsight. Default is False. (Note: this requires that ResInsight is open on the computer.)
154
155        Notes
156        -----
157        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
158        - NOTE: Not available in current version of PIPT
159        """
160
161    def calc_mahalanobis(self, combi_list=(1, None)):
162        """
163        Calculate the mahalanobis distance as described in "Oliver, D. S. (2020). Diagnosing reservoir model deficiency
164        for model improvement. Journal of Petroleum Science and Engineering, 193(February).
165        https://doi.org/10.1016/j.petrol.2020.107367"
166
167        Parameters
168        ----------
169        combi_list : list
170            List of levels and possible combinations of datatypes. The list must be given as a tuple with pairs:
171            - level : int
172                Defines which level. Default is 1.
173            - combi_typ : str
174                Defines how data are combined. Default is no combine.
175
176        Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
177        NOTE: Not available in current version of PIPT
178        """
179
180    def calc_da_stat(self, options=None):
181        """
182        Calculate statistics for the updated parameters. The persentage of parameters that have updates larger than one,
183        two and three standard deviations (calculated from the initial ensemble) are flagged.
184
185        Parameters
186        ----------
187        options : dict
188            Settings for statistics.
189            write_to_file : bool, optional
190                Whether to write results to a .grdecl file. Defaults to False.
191
192        Notes
193        -----
194        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
195        - NOTE: Not available in current version of PIPT
196        """
class QAQC:
 20class QAQC:
 21    """
 22     Perform Quality Assurance of the forecast (QA) and analysis (QC) step.
 23     Available functions (developed in 4DSEIS project and not available yet):
 24
 25        - `calc_coverage`: check forecast data coverage
 26        - `calc_mahalanobis`: evaluate "higher-order" data coverage
 27        - `calc_kg`: check/write individual gain for parameters;
 28        -          flag data which have conflicting updates
 29        - `calc_da_stat`: compute statistics for updated parameters
 30
 31     Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
 32     """
 33
 34    # Initialize
 35    def __init__(self, keys, obs_data, datavar, logger=None, prior_info=None, sim=None, ini_state=None):
 36        self.keys = keys  # input info for the case
 37        self.obs_data = obs_data  # observed (real) data
 38        self.datavar = datavar  # data variance
 39        if logger is None:  # define a logger to print ouput
 40            logging.basicConfig(level=logging.INFO,
 41                                filename='qaqc_logger.log',
 42                                filemode='a',
 43                                format='%(asctime)s : %(levelname)s : %(name)s : %(message)s')
 44            self.logger = logging.getLogger('QAQC')
 45        else:
 46            self.logger = logging.getLogger('PET.PIPT.QCQA')
 47        self.prior_info = prior_info  # prior info for the different parameter types
 48        # this class contains potential writing functions (this class can be saved to debug_analysis)
 49        self.sim = sim
 50        self.ini_state = ini_state  # the first state; used to compute statistics
 51        self.ne = 0
 52        if self.ini_state is not None:
 53            # get the ensemble size from here
 54            self.ne = self.ini_state[list(self.ini_state.keys())[0]].shape[1]
 55
 56        assim_step = 0  # Assume simultaneous assimiation
 57        assim_ind = [keys['obsname'], keys['assimindex'][assim_step]]
 58        if isinstance(assim_ind[1], list):  # Check if prim. ind. is a list
 59            self.l_prim = [int(x) for x in assim_ind[1]]
 60        else:  # Float
 61            self.l_prim = [int(assim_ind[1])]
 62
 63        self.data_types = list(obs_data[0].keys())  # All data types
 64        self.en_obs = {}
 65        self.en_obs_vec = {}
 66        self.en_time = {}
 67        self.en_time_vec = {}
 68        for typ in self.data_types:
 69            self.en_obs[typ] = np.array(
 70                [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
 71                 is not None and self.obs_data[ind][typ].shape == (1,)])
 72            l = [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ] is not None
 73                 and self.obs_data[ind][typ].shape[0] > 1]
 74            if l:
 75                self.en_obs_vec[typ] = np.expand_dims(np.concatenate(l), 1)
 76            self.en_time[typ] = [ind for ind in self.l_prim if self.obs_data[ind][typ]
 77                                 is not None and self.obs_data[ind][typ].shape == (1,)]
 78            l = [ind for ind in self.l_prim if self.obs_data[ind][typ]
 79                 is not None and self.obs_data[ind][typ].shape[0] > 1]
 80            if l:
 81                self.en_time_vec[typ] = l
 82
 83        # Check if the QA folder is generated
 84        self.folder = 'QAQC' + os.sep
 85        if not os.path.exists(self.folder):
 86            os.mkdir(self.folder)  # if not generate
 87
 88        if 'localization' in self.keys:
 89            self.localization = cov_regularization.localization(self.keys['localization'],
 90                                                                self.keys['truedataindex'],
 91                                                                self.keys['datatype'],
 92                                                                self.keys['staticvar'],
 93                                                                self.ne)
 94        self.pred_data = None
 95        self.state = None
 96        self.en_fcst = {}
 97        self.en_fcst_vec = {}
 98        self.lam = None
 99
100    # Set the predicted data and current state
101    def set(self, pred_data, state=None, lam=None):
102        self.pred_data = pred_data
103        for typ in self.data_types:
104            self.en_fcst[typ] = np.array(
105                [self.pred_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
106                 is not None and self.obs_data[ind][typ].shape == (1,)])
107            l = [self.pred_data[ind][typ] for ind in self.l_prim if self.obs_data[ind][typ] is not None
108                 and self.obs_data[ind][typ].shape[0] > 1]
109            if l:
110                self.en_fcst_vec[typ] = np.concatenate(l)
111        self.state = state
112        self.lam = lam
113
114    def calc_coverage(self, line=None):
115        """
116        Calculate the Data coverage for production and seismic data. For seismic data the plotting is based on the
117        importance-scaled coverage developed by Espen O. Lie from GeoCore.
118
119        Parameters
120        ----------
121        line : array-like, optional
122            If not None, plot 1D coverage.
123
124        Notes
125        -----
126        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
127        - NOTE: Not available in current version of PIPT
128        """
129
130    def calc_kg(self, options=None):
131        """
132        Check/write individual gain for parameters.
133        Note form ES gain with an identity Cd... This can be improved
134
135        Visualization of the many of these parameters is problem-specific. In reservoir simulation cases, it is necessary
136        to write this to the simulation grid. While for other applications, one might want other visualization. Hence,
137        the method also depends on a simulator specific writer.
138
139        Parameters
140        ----------
141        options : dict
142            Settings for the Kalman gain computations.
143            - 'num_store' : int, optional
144                Number of elements to store. Default is 10.
145            - 'unique_time' : bool, optional
146                Calculate for each time instance. Default is False.
147            - 'plot_all_kg' : bool, optional
148                Plot all the Kalman gains for the field parameters. If False, plot the num_store. Default is False.
149            - 'only_log' : bool, optional
150                Only write to the logger; no plotting. Default is True.
151            - 'auto_ada_loc' : bool, optional
152                Use localization in computations. Default is True.
153            - 'write_to_resinsight' : bool, optional
154                Pipe results to ResInsight. Default is False. (Note: this requires that ResInsight is open on the computer.)
155
156        Notes
157        -----
158        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
159        - NOTE: Not available in current version of PIPT
160        """
161
162    def calc_mahalanobis(self, combi_list=(1, None)):
163        """
164        Calculate the mahalanobis distance as described in "Oliver, D. S. (2020). Diagnosing reservoir model deficiency
165        for model improvement. Journal of Petroleum Science and Engineering, 193(February).
166        https://doi.org/10.1016/j.petrol.2020.107367"
167
168        Parameters
169        ----------
170        combi_list : list
171            List of levels and possible combinations of datatypes. The list must be given as a tuple with pairs:
172            - level : int
173                Defines which level. Default is 1.
174            - combi_typ : str
175                Defines how data are combined. Default is no combine.
176
177        Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
178        NOTE: Not available in current version of PIPT
179        """
180
181    def calc_da_stat(self, options=None):
182        """
183        Calculate statistics for the updated parameters. The persentage of parameters that have updates larger than one,
184        two and three standard deviations (calculated from the initial ensemble) are flagged.
185
186        Parameters
187        ----------
188        options : dict
189            Settings for statistics.
190            write_to_file : bool, optional
191                Whether to write results to a .grdecl file. Defaults to False.
192
193        Notes
194        -----
195        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
196        - NOTE: Not available in current version of PIPT
197        """

Perform Quality Assurance of the forecast (QA) and analysis (QC) step. Available functions (developed in 4DSEIS project and not available yet):

  • calc_coverage: check forecast data coverage
  • calc_mahalanobis: evaluate "higher-order" data coverage
  • calc_kg: check/write individual gain for parameters;
  • flag data which have conflicting updates
  • calc_da_stat: compute statistics for updated parameters

Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS

QAQC( keys, obs_data, datavar, logger=None, prior_info=None, sim=None, ini_state=None)
35    def __init__(self, keys, obs_data, datavar, logger=None, prior_info=None, sim=None, ini_state=None):
36        self.keys = keys  # input info for the case
37        self.obs_data = obs_data  # observed (real) data
38        self.datavar = datavar  # data variance
39        if logger is None:  # define a logger to print ouput
40            logging.basicConfig(level=logging.INFO,
41                                filename='qaqc_logger.log',
42                                filemode='a',
43                                format='%(asctime)s : %(levelname)s : %(name)s : %(message)s')
44            self.logger = logging.getLogger('QAQC')
45        else:
46            self.logger = logging.getLogger('PET.PIPT.QCQA')
47        self.prior_info = prior_info  # prior info for the different parameter types
48        # this class contains potential writing functions (this class can be saved to debug_analysis)
49        self.sim = sim
50        self.ini_state = ini_state  # the first state; used to compute statistics
51        self.ne = 0
52        if self.ini_state is not None:
53            # get the ensemble size from here
54            self.ne = self.ini_state[list(self.ini_state.keys())[0]].shape[1]
55
56        assim_step = 0  # Assume simultaneous assimiation
57        assim_ind = [keys['obsname'], keys['assimindex'][assim_step]]
58        if isinstance(assim_ind[1], list):  # Check if prim. ind. is a list
59            self.l_prim = [int(x) for x in assim_ind[1]]
60        else:  # Float
61            self.l_prim = [int(assim_ind[1])]
62
63        self.data_types = list(obs_data[0].keys())  # All data types
64        self.en_obs = {}
65        self.en_obs_vec = {}
66        self.en_time = {}
67        self.en_time_vec = {}
68        for typ in self.data_types:
69            self.en_obs[typ] = np.array(
70                [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
71                 is not None and self.obs_data[ind][typ].shape == (1,)])
72            l = [self.obs_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ] is not None
73                 and self.obs_data[ind][typ].shape[0] > 1]
74            if l:
75                self.en_obs_vec[typ] = np.expand_dims(np.concatenate(l), 1)
76            self.en_time[typ] = [ind for ind in self.l_prim if self.obs_data[ind][typ]
77                                 is not None and self.obs_data[ind][typ].shape == (1,)]
78            l = [ind for ind in self.l_prim if self.obs_data[ind][typ]
79                 is not None and self.obs_data[ind][typ].shape[0] > 1]
80            if l:
81                self.en_time_vec[typ] = l
82
83        # Check if the QA folder is generated
84        self.folder = 'QAQC' + os.sep
85        if not os.path.exists(self.folder):
86            os.mkdir(self.folder)  # if not generate
87
88        if 'localization' in self.keys:
89            self.localization = cov_regularization.localization(self.keys['localization'],
90                                                                self.keys['truedataindex'],
91                                                                self.keys['datatype'],
92                                                                self.keys['staticvar'],
93                                                                self.ne)
94        self.pred_data = None
95        self.state = None
96        self.en_fcst = {}
97        self.en_fcst_vec = {}
98        self.lam = None
keys
obs_data
datavar
prior_info
sim
ini_state
ne
data_types
en_obs
en_obs_vec
en_time
en_time_vec
folder
pred_data
state
en_fcst
en_fcst_vec
lam
def set(self, pred_data, state=None, lam=None):
101    def set(self, pred_data, state=None, lam=None):
102        self.pred_data = pred_data
103        for typ in self.data_types:
104            self.en_fcst[typ] = np.array(
105                [self.pred_data[ind][typ].flatten() for ind in self.l_prim if self.obs_data[ind][typ]
106                 is not None and self.obs_data[ind][typ].shape == (1,)])
107            l = [self.pred_data[ind][typ] for ind in self.l_prim if self.obs_data[ind][typ] is not None
108                 and self.obs_data[ind][typ].shape[0] > 1]
109            if l:
110                self.en_fcst_vec[typ] = np.concatenate(l)
111        self.state = state
112        self.lam = lam
def calc_coverage(self, line=None):
114    def calc_coverage(self, line=None):
115        """
116        Calculate the Data coverage for production and seismic data. For seismic data the plotting is based on the
117        importance-scaled coverage developed by Espen O. Lie from GeoCore.
118
119        Parameters
120        ----------
121        line : array-like, optional
122            If not None, plot 1D coverage.
123
124        Notes
125        -----
126        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
127        - NOTE: Not available in current version of PIPT
128        """

Calculate the Data coverage for production and seismic data. For seismic data the plotting is based on the importance-scaled coverage developed by Espen O. Lie from GeoCore.

Parameters
  • line (array-like, optional): If not None, plot 1D coverage.
Notes
  • Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
  • NOTE: Not available in current version of PIPT
def calc_kg(self, options=None):
130    def calc_kg(self, options=None):
131        """
132        Check/write individual gain for parameters.
133        Note form ES gain with an identity Cd... This can be improved
134
135        Visualization of the many of these parameters is problem-specific. In reservoir simulation cases, it is necessary
136        to write this to the simulation grid. While for other applications, one might want other visualization. Hence,
137        the method also depends on a simulator specific writer.
138
139        Parameters
140        ----------
141        options : dict
142            Settings for the Kalman gain computations.
143            - 'num_store' : int, optional
144                Number of elements to store. Default is 10.
145            - 'unique_time' : bool, optional
146                Calculate for each time instance. Default is False.
147            - 'plot_all_kg' : bool, optional
148                Plot all the Kalman gains for the field parameters. If False, plot the num_store. Default is False.
149            - 'only_log' : bool, optional
150                Only write to the logger; no plotting. Default is True.
151            - 'auto_ada_loc' : bool, optional
152                Use localization in computations. Default is True.
153            - 'write_to_resinsight' : bool, optional
154                Pipe results to ResInsight. Default is False. (Note: this requires that ResInsight is open on the computer.)
155
156        Notes
157        -----
158        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
159        - NOTE: Not available in current version of PIPT
160        """

Check/write individual gain for parameters. Note form ES gain with an identity Cd... This can be improved

Visualization of the many of these parameters is problem-specific. In reservoir simulation cases, it is necessary to write this to the simulation grid. While for other applications, one might want other visualization. Hence, the method also depends on a simulator specific writer.

Parameters
  • options (dict): Settings for the Kalman gain computations.
    • 'num_store' : int, optional Number of elements to store. Default is 10.
    • 'unique_time' : bool, optional Calculate for each time instance. Default is False.
    • 'plot_all_kg' : bool, optional Plot all the Kalman gains for the field parameters. If False, plot the num_store. Default is False.
    • 'only_log' : bool, optional Only write to the logger; no plotting. Default is True.
    • 'auto_ada_loc' : bool, optional Use localization in computations. Default is True.
    • 'write_to_resinsight' : bool, optional Pipe results to ResInsight. Default is False. (Note: this requires that ResInsight is open on the computer.)
Notes
  • Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
  • NOTE: Not available in current version of PIPT
def calc_mahalanobis(self, combi_list=(1, None)):
162    def calc_mahalanobis(self, combi_list=(1, None)):
163        """
164        Calculate the mahalanobis distance as described in "Oliver, D. S. (2020). Diagnosing reservoir model deficiency
165        for model improvement. Journal of Petroleum Science and Engineering, 193(February).
166        https://doi.org/10.1016/j.petrol.2020.107367"
167
168        Parameters
169        ----------
170        combi_list : list
171            List of levels and possible combinations of datatypes. The list must be given as a tuple with pairs:
172            - level : int
173                Defines which level. Default is 1.
174            - combi_typ : str
175                Defines how data are combined. Default is no combine.
176
177        Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
178        NOTE: Not available in current version of PIPT
179        """

Calculate the mahalanobis distance as described in "Oliver, D. S. (2020). Diagnosing reservoir model deficiency for model improvement. Journal of Petroleum Science and Engineering, 193(February). https://doi.org/10.1016/j.petrol.2020.107367"

Parameters
  • combi_list (list): List of levels and possible combinations of datatypes. The list must be given as a tuple with pairs:
    • level : int Defines which level. Default is 1.
    • combi_typ : str Defines how data are combined. Default is no combine.
  • Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
  • NOTE (Not available in current version of PIPT):
def calc_da_stat(self, options=None):
181    def calc_da_stat(self, options=None):
182        """
183        Calculate statistics for the updated parameters. The persentage of parameters that have updates larger than one,
184        two and three standard deviations (calculated from the initial ensemble) are flagged.
185
186        Parameters
187        ----------
188        options : dict
189            Settings for statistics.
190            write_to_file : bool, optional
191                Whether to write results to a .grdecl file. Defaults to False.
192
193        Notes
194        -----
195        - Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
196        - NOTE: Not available in current version of PIPT
197        """

Calculate statistics for the updated parameters. The persentage of parameters that have updates larger than one, two and three standard deviations (calculated from the initial ensemble) are flagged.

Parameters
  • options (dict): Settings for statistics. write_to_file : bool, optional Whether to write results to a .grdecl file. Defaults to False.
Notes
  • Copyright (c) 2019-2022 NORCE, All Rights Reserved. 4DSEIS
  • NOTE: Not available in current version of PIPT