Coverage for /Users/buh/.pyenv/versions/3.12.2/envs/es-testbed/lib/python3.12/site-packages/es_testbed/_base.py: 80%

142 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-08-21 12:05 -0600

1"""Base TestBed Class""" 

2 

3import typing as t 

4import logging 

5from importlib import import_module 

6from datetime import datetime, timezone 

7from shutil import rmtree 

8from es_testbed.exceptions import ResultNotExpected 

9from es_testbed.defaults import NAMEMAPPER 

10from es_testbed.helpers.es_api import delete, get 

11from es_testbed.helpers.utils import prettystr, process_preset 

12from es_testbed._plan import PlanBuilder 

13from es_testbed.mgrs import ( 

14 ComponentMgr, 

15 DataStreamMgr, 

16 IlmMgr, 

17 IndexMgr, 

18 SnapshotMgr, 

19 TemplateMgr, 

20) 

21 

22if t.TYPE_CHECKING: 

23 from elasticsearch8 import Elasticsearch 

24 

25logger = logging.getLogger('es_testbed.TestBed') 

26 

27# pylint: disable=R0902,R0913 

28 

29# Preset Import 

30# This imports the preset directory which must include the following files: 

31# - A plan YAML file. 

32# - A buildlist YAML file. 

33# - A functions.py file (the actual python code), which must contain a 

34# function named doc_generator(). This function must accept all kwargs from 

35# the buildlist's options 

36# - A definitions.py file, which is a Python variable file that helps find 

37# the path to the module, etc., as well as import the plan, the buildlist, 

38# the mappings and settings, etc. This must at least include a get_plan() 

39# function that returns a dictionary of a plan. 

40# - A mappings.json file (contains the index mappings your docs need) 

41# - A settings.json file (contains the index settings) 

42# 

43# Any other files can be included to help your doc_generator function, e.g. 

44# Faker definitions and classes, etc. Once the preset module is imported, 

45# relative imports should work. 

46 

47 

48class TestBed: 

49 """TestBed Class""" 

50 

51 __test__ = False # Without this, this appears to be test class because of the name 

52 

53 def __init__( 

54 self, 

55 client: 'Elasticsearch' = None, 

56 builtin: t.Union[str, None] = None, 

57 path: t.Union[str, None] = None, 

58 ref: t.Union[str, None] = None, 

59 url: t.Union[str, None] = None, 

60 scenario: t.Union[str, None] = None, 

61 ): 

62 #: The plan settings 

63 self.settings = None 

64 

65 modpath, tmpdir = process_preset(builtin, path, ref, url) 

66 if modpath is None: 

67 msg = 'Must define a preset' 

68 logger.critical(msg) 

69 raise ValueError(msg) 

70 

71 try: 

72 preset = import_module(f'{modpath}.definitions') 

73 self.settings = preset.get_plan(scenario) 

74 except ImportError as err: 

75 logger.critical('Preset settings incomplete or incorrect') 

76 raise err 

77 

78 self.settings['modpath'] = modpath 

79 if scenario: 

80 self.settings['scenario'] = scenario 

81 if tmpdir: 

82 self.settings['tmpdir'] = tmpdir 

83 

84 #: The Elasticsearch client object 

85 self.client = client 

86 #: The test plan 

87 self.plan = None 

88 

89 # Set up for tracking 

90 #: The ILM entity manager 

91 self.ilmmgr = None 

92 #: The Component Template entity manager 

93 self.componentmgr = None 

94 #: The (index) Template entity manager 

95 self.templatemgr = None 

96 #: The Snapshot entity manager 

97 self.snapshotmgr = None 

98 #: The Index entity manager 

99 self.indexmgr = None 

100 #: The data_stream entity manager 

101 self.data_streammgr = None 

102 

103 # At this point, we have an imported preset. If we need to tweak the plan, we 

104 # just overwrite the values in the plan. We have the ILM settings, the 

105 # index_buildlist, etc. We can update/change whatever we want right up until we 

106 # call .setup() 

107 

108 def _erase(self, kind: str, lst: t.Sequence[str]) -> None: 

109 overall_success = True 

110 if not lst: 

111 logger.debug('%s: nothing to delete.', kind) 

112 return True 

113 if kind == 'ilm': # ILM policies can't be batch deleted 

114 ilm = [self._while(kind, x) for x in lst] 

115 overall_success = False not in ilm # No False values == True 

116 else: 

117 overall_success = self._while(kind, ','.join(lst)) 

118 return overall_success 

119 

120 def _fodder_generator( 

121 self, 

122 ) -> t.Generator[str, t.Sequence[str], None]: 

123 """Method to delete everything matching our pattern(s)""" 

124 items = ['index', 'data_stream', 'snapshot', 'template', 'component', 'ilm'] 

125 for i in items: 

126 if i == 'snapshot' and self.plan.repository is None: 

127 logger.debug('No repository, no snapshots.') 

128 continue 

129 pattern = f'*{self.plan.prefix}-{NAMEMAPPER[i]}-{self.plan.uniq}*' 

130 entities = get(self.client, i, pattern, repository=self.plan.repository) 

131 yield (i, entities) 

132 

133 def _while(self, kind: str, item: str) -> bool: 

134 count = 1 

135 success = False 

136 exc = None 

137 while count < 4 and not success: 

138 try: 

139 success = delete( 

140 self.client, kind, item, repository=self.plan.repository 

141 ) 

142 break 

143 except ResultNotExpected as err: 

144 logger.debug('Tried deleting "%s" %s time(s)', item, count) 

145 exc = err 

146 count += 1 

147 if not success: 

148 logger.warning( 

149 'Failed to delete "%s" after %s tries. Final error: %s', 

150 item, 

151 count - 1, 

152 exc, 

153 ) 

154 return success 

155 

156 def get_ilm_polling(self) -> None: 

157 """ 

158 Get current ILM polling settings and store them in self.plan.polling_interval 

159 """ 

160 logger.info('Storing current ILM polling settings, if any...') 

161 try: 

162 res = dict(self.client.cluster.get_settings()) 

163 logger.debug('Cluster settings: %s', prettystr(res)) 

164 except Exception as err: 

165 logger.critical('Unable to get persistent cluster settings') 

166 logger.critical('This could be permissions, or something larger.') 

167 logger.critical('Exception: %s', prettystr(err)) 

168 logger.critical('Exiting.') 

169 raise err 

170 try: 

171 retval = res['persistent']['indices']['lifecycle']['poll_interval'] 

172 except KeyError: 

173 logger.debug( 

174 'No setting for indices.lifecycle.poll_interval. Must be default' 

175 ) 

176 retval = None # Must be an actual value to go into a DotMap 

177 if retval == '1s': 

178 msg = ( 

179 'ILM polling already set at 1s. A previous run most likely did not ' 

180 'tear down properly. Resetting to null after this run' 

181 ) 

182 logger.warning(msg) 

183 retval = None # Must be an actual value to go into a DotMap 

184 self.plan.ilm_polling_interval = retval 

185 logger.info('Stored ILM Polling Interval: %s', retval) 

186 

187 def ilm_polling(self, interval: t.Union[str, None] = None) -> t.Dict: 

188 """Return persistent cluster settings to speed up ILM polling during testing""" 

189 return {'indices.lifecycle.poll_interval': interval} 

190 

191 def setup(self) -> None: 

192 """Setup the instance""" 

193 start = datetime.now(timezone.utc) 

194 # If we build self.plan here, then we can modify settings before setup() 

195 self.plan = PlanBuilder(settings=self.settings).plan 

196 self.get_ilm_polling() 

197 logger.info('Setting: %s', self.ilm_polling(interval='1s')) 

198 self.client.cluster.put_settings(persistent=self.ilm_polling(interval='1s')) 

199 self.setup_entitymgrs() 

200 end = datetime.now(timezone.utc) 

201 logger.info('Testbed setup elapsed time: %s', (end - start).total_seconds()) 

202 

203 def setup_entitymgrs(self) -> None: 

204 """ 

205 Setup each EntityMgr child class 

206 """ 

207 kw = {'client': self.client, 'plan': self.plan} 

208 self.ilmmgr = IlmMgr(**kw) 

209 self.ilmmgr.setup() 

210 self.componentmgr = ComponentMgr(**kw) 

211 self.componentmgr.setup() 

212 self.templatemgr = TemplateMgr(**kw) 

213 self.templatemgr.setup() 

214 self.snapshotmgr = SnapshotMgr(**kw) 

215 self.snapshotmgr.setup() 

216 if self.plan.type == 'indices': 

217 self.indexmgr = IndexMgr(**kw, snapmgr=self.snapshotmgr) 

218 self.indexmgr.setup() 

219 if self.plan.type == 'data_stream': 

220 self.data_streammgr = DataStreamMgr(**kw, snapmgr=self.snapshotmgr) 

221 self.data_streammgr.setup() 

222 

223 def teardown(self) -> None: 

224 """Tear down anything we created""" 

225 start = datetime.now(timezone.utc) 

226 successful = True 

227 if self.plan.tmpdir: 

228 logger.debug('Removing tmpdir: %s', self.plan.tmpdir) 

229 rmtree(self.plan.tmpdir) # Remove the tmpdir stored here 

230 for kind, list_of_kind in self._fodder_generator(): 

231 if not self._erase(kind, list_of_kind): 

232 successful = False 

233 persist = self.ilm_polling(interval=self.plan.ilm_polling_interval) 

234 logger.info( 

235 'Restoring ILM polling to previous value: %s', 

236 self.plan.ilm_polling_interval, 

237 ) 

238 self.client.cluster.put_settings(persistent=persist) 

239 end = datetime.now(timezone.utc) 

240 logger.info('Testbed teardown elapsed time: %s', (end - start).total_seconds()) 

241 if successful: 

242 logger.info('Cleanup successful') 

243 else: 

244 logger.error('Cleanup was unsuccessful/incomplete') 

245 self.plan.cleanup = successful