Coverage for /Users/buh/.pyenv/versions/3.12.2/envs/es-testbed/lib/python3.12/site-packages/es_testbed/_base.py: 17%
142 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-08-30 20:56 -0600
« prev ^ index » next coverage.py v7.4.4, created at 2024-08-30 20:56 -0600
1"""Base TestBed Class"""
3import typing as t
4import logging
5from importlib import import_module
6from datetime import datetime, timezone
7from shutil import rmtree
8from es_testbed.exceptions import ResultNotExpected
9from es_testbed.defaults import NAMEMAPPER
10from es_testbed.helpers.es_api import delete, get
11from es_testbed.helpers.utils import prettystr, process_preset
12from es_testbed._plan import PlanBuilder
13from es_testbed.mgrs import (
14 ComponentMgr,
15 DataStreamMgr,
16 IlmMgr,
17 IndexMgr,
18 SnapshotMgr,
19 TemplateMgr,
20)
22if t.TYPE_CHECKING:
23 from elasticsearch8 import Elasticsearch
25logger = logging.getLogger('es_testbed.TestBed')
27# pylint: disable=R0902,R0913
29# Preset Import
30# This imports the preset directory which must include the following files:
31# - A plan YAML file.
32# - A buildlist YAML file.
33# - A functions.py file (the actual python code), which must contain a
34# function named doc_generator(). This function must accept all kwargs from
35# the buildlist's options
36# - A definitions.py file, which is a Python variable file that helps find
37# the path to the module, etc., as well as import the plan, the buildlist,
38# the mappings and settings, etc. This must at least include a get_plan()
39# function that returns a dictionary of a plan.
40# - A mappings.json file (contains the index mappings your docs need)
41# - A settings.json file (contains the index settings)
42#
43# Any other files can be included to help your doc_generator function, e.g.
44# Faker definitions and classes, etc. Once the preset module is imported,
45# relative imports should work.
48class TestBed:
49 """TestBed Class"""
51 __test__ = False # Without this, this appears to be test class because of the name
53 def __init__(
54 self,
55 client: 'Elasticsearch' = None,
56 builtin: t.Union[str, None] = None,
57 path: t.Union[str, None] = None,
58 ref: t.Union[str, None] = None,
59 url: t.Union[str, None] = None,
60 scenario: t.Union[str, None] = None,
61 ):
62 #: The plan settings
63 self.settings = None
65 modpath, tmpdir = process_preset(builtin, path, ref, url)
66 if modpath is None:
67 msg = 'Must define a preset'
68 logger.critical(msg)
69 raise ValueError(msg)
71 try:
72 preset = import_module(f'{modpath}.definitions')
73 self.settings = preset.get_plan(scenario)
74 except ImportError as err:
75 logger.critical('Preset settings incomplete or incorrect')
76 raise err
78 self.settings['modpath'] = modpath
79 if scenario:
80 self.settings['scenario'] = scenario
81 if tmpdir:
82 self.settings['tmpdir'] = tmpdir
84 #: The Elasticsearch client object
85 self.client = client
86 #: The test plan
87 self.plan = None
89 # Set up for tracking
90 #: The ILM entity manager
91 self.ilmmgr = None
92 #: The Component Template entity manager
93 self.componentmgr = None
94 #: The (index) Template entity manager
95 self.templatemgr = None
96 #: The Snapshot entity manager
97 self.snapshotmgr = None
98 #: The Index entity manager
99 self.indexmgr = None
100 #: The data_stream entity manager
101 self.data_streammgr = None
103 # At this point, we have an imported preset. If we need to tweak the plan, we
104 # just overwrite the values in the plan. We have the ILM settings, the
105 # index_buildlist, etc. We can update/change whatever we want right up until we
106 # call .setup()
108 def _erase(self, kind: str, lst: t.Sequence[str]) -> None:
109 overall_success = True
110 if not lst:
111 logger.debug('%s: nothing to delete.', kind)
112 return True
113 if kind == 'ilm': # ILM policies can't be batch deleted
114 ilm = [self._while(kind, x) for x in lst]
115 overall_success = False not in ilm # No False values == True
116 else:
117 overall_success = self._while(kind, ','.join(lst))
118 return overall_success
120 def _fodder_generator(
121 self,
122 ) -> t.Generator[str, t.Sequence[str], None]:
123 """Method to delete everything matching our pattern(s)"""
124 items = ['index', 'data_stream', 'snapshot', 'template', 'component', 'ilm']
125 for i in items:
126 if i == 'snapshot' and self.plan.repository is None:
127 logger.debug('No repository, no snapshots.')
128 continue
129 pattern = f'*{self.plan.prefix}-{NAMEMAPPER[i]}-{self.plan.uniq}*'
130 entities = get(self.client, i, pattern, repository=self.plan.repository)
131 yield (i, entities)
133 def _while(self, kind: str, item: str) -> bool:
134 count = 1
135 success = False
136 exc = None
137 while count < 4 and not success:
138 try:
139 success = delete(
140 self.client, kind, item, repository=self.plan.repository
141 )
142 break
143 except ResultNotExpected as err:
144 logger.debug('Tried deleting "%s" %s time(s)', item, count)
145 exc = err
146 count += 1
147 if not success:
148 logger.warning(
149 'Failed to delete "%s" after %s tries. Final error: %s',
150 item,
151 count - 1,
152 exc,
153 )
154 return success
156 def get_ilm_polling(self) -> None:
157 """
158 Get current ILM polling settings and store them in self.plan.polling_interval
159 """
160 logger.info('Storing current ILM polling settings, if any...')
161 try:
162 res = dict(self.client.cluster.get_settings())
163 logger.debug('Cluster settings: %s', prettystr(res))
164 except Exception as err:
165 logger.critical('Unable to get persistent cluster settings')
166 logger.critical('This could be permissions, or something larger.')
167 logger.critical('Exception: %s', prettystr(err))
168 logger.critical('Exiting.')
169 raise err
170 try:
171 retval = res['persistent']['indices']['lifecycle']['poll_interval']
172 except KeyError:
173 logger.debug(
174 'No setting for indices.lifecycle.poll_interval. Must be default'
175 )
176 retval = None # Must be an actual value to go into a DotMap
177 if retval == '1s':
178 msg = (
179 'ILM polling already set at 1s. A previous run most likely did not '
180 'tear down properly. Resetting to null after this run'
181 )
182 logger.warning(msg)
183 retval = None # Must be an actual value to go into a DotMap
184 self.plan.ilm_polling_interval = retval
185 logger.info('Stored ILM Polling Interval: %s', retval)
187 def ilm_polling(self, interval: t.Union[str, None] = None) -> t.Dict:
188 """Return persistent cluster settings to speed up ILM polling during testing"""
189 return {'indices.lifecycle.poll_interval': interval}
191 def setup(self) -> None:
192 """Setup the instance"""
193 start = datetime.now(timezone.utc)
194 # If we build self.plan here, then we can modify settings before setup()
195 self.plan = PlanBuilder(settings=self.settings).plan
196 self.get_ilm_polling()
197 logger.info('Setting: %s', self.ilm_polling(interval='1s'))
198 self.client.cluster.put_settings(persistent=self.ilm_polling(interval='1s'))
199 self.setup_entitymgrs()
200 end = datetime.now(timezone.utc)
201 logger.info('Testbed setup elapsed time: %s', (end - start).total_seconds())
203 def setup_entitymgrs(self) -> None:
204 """
205 Setup each EntityMgr child class
206 """
207 kw = {'client': self.client, 'plan': self.plan}
209 self.ilmmgr = IlmMgr(**kw)
210 self.ilmmgr.setup()
211 self.componentmgr = ComponentMgr(**kw)
212 self.componentmgr.setup()
213 self.templatemgr = TemplateMgr(**kw)
214 self.templatemgr.setup()
215 self.snapshotmgr = SnapshotMgr(**kw)
216 self.snapshotmgr.setup()
217 if self.plan.type == 'indices':
218 self.indexmgr = IndexMgr(**kw, snapmgr=self.snapshotmgr)
219 self.indexmgr.setup()
220 if self.plan.type == 'data_stream':
221 self.data_streammgr = DataStreamMgr(**kw, snapmgr=self.snapshotmgr)
222 self.data_streammgr.setup()
224 def teardown(self) -> None:
225 """Tear down anything we created"""
226 start = datetime.now(timezone.utc)
227 successful = True
228 if self.plan.tmpdir:
229 logger.debug('Removing tmpdir: %s', self.plan.tmpdir)
230 rmtree(self.plan.tmpdir) # Remove the tmpdir stored here
231 for kind, list_of_kind in self._fodder_generator():
232 if not self._erase(kind, list_of_kind):
233 successful = False
234 persist = self.ilm_polling(interval=self.plan.ilm_polling_interval)
235 logger.info(
236 'Restoring ILM polling to previous value: %s',
237 self.plan.ilm_polling_interval,
238 )
239 self.client.cluster.put_settings(persistent=persist)
240 end = datetime.now(timezone.utc)
241 logger.info('Testbed teardown elapsed time: %s', (end - start).total_seconds())
242 if successful:
243 logger.info('Cleanup successful')
244 else:
245 logger.error('Cleanup was unsuccessful/incomplete')
246 self.plan.cleanup = successful