Coverage for /Users/buh/.pyenv/versions/3.12.2/envs/es-testbed/lib/python3.12/site-packages/es_testbed/_base.py: 83%
130 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-05-02 23:00 -0600
« prev ^ index » next coverage.py v7.4.4, created at 2024-05-02 23:00 -0600
1"""Base TestBed Class"""
3import typing as t
4import logging
5from datetime import datetime, timezone
6from dotmap import DotMap
7from es_testbed.exceptions import ResultNotExpected
8from es_testbed.defaults import NAMEMAPPER
9from es_testbed.helpers.es_api import delete, get
10from es_testbed.helpers.utils import prettystr
11from es_testbed._plan import PlanBuilder
12from es_testbed.mgrs import (
13 ComponentMgr,
14 DataStreamMgr,
15 IlmMgr,
16 IndexMgr,
17 SnapshotMgr,
18 TemplateMgr,
19)
21if t.TYPE_CHECKING:
22 from elasticsearch8 import Elasticsearch
24logger = logging.getLogger('es_testbed.TestBed')
26# pylint: disable=R0902
29class TestBed:
30 """TestBed Class"""
32 __test__ = False
34 def __init__(
35 self,
36 client: 'Elasticsearch' = None,
37 plan: t.Union[DotMap, t.Dict, None] = None,
38 ):
39 self.client = client
40 if plan is None:
41 raise ValueError('Must provide a plan')
42 if isinstance(plan, PlanBuilder):
43 logger.debug('The plan is already PlanBuilder type.')
44 self.plan = plan.plan
45 elif isinstance(plan, dict):
46 logger.debug('The plan is a dict type.')
47 _ = PlanBuilder(settings=plan)
48 self.plan = _.plan
49 else:
50 raise ValueError('plan must be a PlanBuilder or settings dict')
52 # Set up for tracking
53 self.ilmmgr = None
54 self.componentmgr = None
55 self.templatemgr = None
56 self.snapshotmgr = None
57 self.indexmgr = None
58 self.data_streammgr = None
60 def _erase(self, kind: str, lst: t.Sequence[str]) -> None:
61 overall_success = True
62 if not lst:
63 logger.debug('%s: nothing to delete.', kind)
64 return True
65 if kind == 'ilm': # ILM policies can't be batch deleted
66 ilm = [self._while(kind, x) for x in lst]
67 overall_success = False not in ilm # No False values == True
68 else:
69 overall_success = self._while(kind, ','.join(lst))
70 return overall_success
72 def _fodder_generator(
73 self,
74 ) -> t.Generator[str, t.Sequence[str], None]:
75 """Method to delete everything matching our pattern(s)"""
76 items = ['index', 'data_stream', 'snapshot', 'template', 'component', 'ilm']
77 for i in items:
78 if i == 'snapshot' and self.plan.repository is None:
79 logger.debug('No repository, no snapshots.')
80 continue
81 pattern = f'*{self.plan.prefix}-{NAMEMAPPER[i]}-{self.plan.uniq}*'
82 entities = get(self.client, i, pattern, repository=self.plan.repository)
83 yield (i, entities)
85 def _while(self, kind: str, item: str) -> bool:
86 count = 1
87 success = False
88 exc = None
89 while count < 4 and not success:
90 try:
91 success = delete(
92 self.client, kind, item, repository=self.plan.repository
93 )
94 break
95 except ResultNotExpected as err:
96 logger.debug('Tried deleting "%s" %s time(s)', item, count)
97 exc = err
98 count += 1
99 if not success:
100 logger.warning(
101 'Failed to delete "%s" after %s tries. Final error: %s',
102 item,
103 count - 1,
104 exc,
105 )
106 return success
108 def get_ilm_polling(self) -> None:
109 """
110 Get current ILM polling settings and store them in self.plan.polling_interval
111 """
112 logger.info('Storing current ILM polling settings, if any...')
113 try:
114 res = dict(self.client.cluster.get_settings())
115 logger.debug('Cluster settings: %s', prettystr(res))
116 except Exception as err:
117 logger.critical('Unable to get persistent cluster settings')
118 logger.critical('This could be permissions, or something larger.')
119 logger.critical('Exception: %s', prettystr(err))
120 logger.critical('Exiting.')
121 raise err
122 try:
123 retval = res['persistent']['indices']['lifecycle']['poll_interval']
124 except KeyError:
125 logger.debug(
126 'No setting for indices.lifecycle.poll_interval. Must be default'
127 )
128 retval = None # Must be an actual value to go into a DotMap
129 if retval == '1s':
130 msg = (
131 'ILM polling already set at 1s. A previous run most likely did not '
132 'tear down properly. Resetting to null after this run'
133 )
134 logger.warning(msg)
135 retval = None # Must be an actual value to go into a DotMap
136 self.plan.ilm_polling_interval = retval
137 logger.info('Stored ILM Polling Interval: %s', retval)
139 def ilm_polling(self, interval: t.Union[str, None] = None) -> t.Dict:
140 """Return persistent cluster settings to speed up ILM polling during testing"""
141 return {'indices.lifecycle.poll_interval': interval}
143 def setup(self) -> None:
144 """Setup the instance"""
145 print('break on this line')
146 start = datetime.now(timezone.utc)
147 self.get_ilm_polling()
148 logger.info('Setting: %s', self.ilm_polling(interval='1s'))
149 self.client.cluster.put_settings(persistent=self.ilm_polling(interval='1s'))
150 self.setup_entitymgrs()
151 end = datetime.now(timezone.utc)
152 logger.info('Testbed setup elapsed time: %s', (end - start).total_seconds())
154 def setup_entitymgrs(self) -> None:
155 """
156 Setup each EntityMgr child class
157 """
158 kw = {'client': self.client, 'plan': self.plan}
159 self.ilmmgr = IlmMgr(**kw)
160 self.ilmmgr.setup()
161 self.componentmgr = ComponentMgr(**kw)
162 self.componentmgr.setup()
163 self.templatemgr = TemplateMgr(**kw)
164 self.templatemgr.setup()
165 self.snapshotmgr = SnapshotMgr(**kw)
166 self.snapshotmgr.setup()
167 if self.plan.type == 'indices':
168 self.indexmgr = IndexMgr(**kw, snapmgr=self.snapshotmgr)
169 self.indexmgr.setup()
170 if self.plan.type == 'data_stream':
171 self.data_streammgr = DataStreamMgr(**kw, snapmgr=self.snapshotmgr)
172 self.data_streammgr.setup()
174 def teardown(self) -> None:
175 """Tear down anything we created"""
176 start = datetime.now(timezone.utc)
177 successful = True
178 for kind, list_of_kind in self._fodder_generator():
179 if not self._erase(kind, list_of_kind):
180 successful = False
181 persist = self.ilm_polling(interval=self.plan.ilm_polling_interval)
182 logger.info(
183 'Restoring ILM polling to previous value: %s',
184 self.plan.ilm_polling_interval,
185 )
186 self.client.cluster.put_settings(persistent=persist)
187 end = datetime.now(timezone.utc)
188 logger.info('Testbed teardown elapsed time: %s', (end - start).total_seconds())
189 if successful:
190 logger.info('Cleanup successful')
191 else:
192 logger.error('Cleanup was unsuccessful/incomplete')
193 self.plan.cleanup = successful