Coverage for /Users/martin/prj/git/benchman_pre/src/benchman/cli_commands.py: 0%
102 statements
« prev ^ index » next coverage.py v7.6.4, created at 2024-12-24 08:16 +0100
« prev ^ index » next coverage.py v7.6.4, created at 2024-12-24 08:16 +0100
1# (c) 2024 Martin Wendt; see https://github.com/mar10/benchman
2# Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
4import datetime
5import json
6import platform
7import pprint
8import subprocess
9from argparse import ArgumentParser, Namespace
10from pathlib import Path
11from typing import Any
13from benchman import BenchmarkManager
14from benchman.benchman import get_benchmark_filepath
15from benchman.dataset import Dataset
16from benchman.reporter import TablibReporter
17from benchman.util import (
18 BenchmarkSuiteFile,
19 extract_items,
20 json_dump,
21 logger,
22 split_tokens,
23)
26def handle_combine_command(parser: ArgumentParser, args: Namespace) -> int:
27 bm = BenchmarkManager.singleton()
28 data: list[dict[str, Any]] = []
29 combined: dict[str, Any] = {
30 "combine_date": datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
31 "context": None,
32 "data": data,
33 }
34 #: The keys that must be common to all benchmark files
35 common_context_keys = [
36 "client",
37 "debug_mode",
38 "hardware",
39 "project",
40 "system",
41 "tag",
42 ]
43 combined_files: list[Path] = []
44 first_context: dict[str, Any] | None = None
45 errors = 0
47 for p in bm.folder.glob("*.bmr.json"):
48 with p.open("r") as f:
49 benchmark_data = json.load(f)
51 # Extract specific items from the dictionary
52 common_context = extract_items(benchmark_data, common_context_keys, remove=True)
54 if first_context is None:
55 first_context = common_context
56 combined["context"] = common_context
57 combined["tag"] = bm.tag
58 else:
59 # Check if the context is the same
60 mismatch = []
61 for k, v in common_context.items():
62 if v != first_context.get(k):
63 mismatch.append((k, v, first_context.get(k)))
64 continue
65 if mismatch:
66 errors += 1
67 logger.warning(
68 "Found benchmark with different context information:\n"
69 " " + pprint.pformat(mismatch) + "\n"
70 " Skipping file: " + str(p)
71 )
72 continue
74 # We now have read a benchmark file that has the same context as the
75 # first one.
76 data.append(benchmark_data)
77 combined_files.append(p)
79 if len(data) < 1:
80 logger.warning(f"No benchmark files found in {bm.folder}")
81 return 2
83 target_path = get_benchmark_filepath(args.tag)
84 with target_path.open("w") as f:
85 json_dump(combined, f, pretty=True)
87 if not args.no_purge and not errors:
88 for p in combined_files:
89 logger.debug(f"Delete {p}")
90 p.unlink()
92 logger.info(f"Combined {len(data)} benchmark files into {target_path}")
93 return 3 if errors else 0
96def handle_info_command(parser: ArgumentParser, args: Namespace):
97 bm = BenchmarkManager.singleton()
98 uname = platform.uname()
100 files = BenchmarkSuiteFile.find_files(bm.folder)
101 tags = sorted(list({f.tag for f in files}))
103 logger.info("Benchman (https://github.com/mar10/benchman):")
104 logger.info(f" Project : {bm.project_name} v{bm.project_version}")
105 logger.info(f" Folder : {bm.folder}")
106 logger.info(f" Node : {uname.node}")
107 logger.info(f" System : {uname.system} {uname.release}")
108 logger.info(f" Machine : {uname.machine}")
109 logger.info(f" Client ID : {bm.context.client_slug()}")
110 logger.info(f' Tags : {", ".join(tags)}')
112 if args.list:
113 logger.info(f"{len(files)} benchmark suites:")
114 for f in files:
115 logger.info(f" {f.name}")
116 return
119def handle_run_command(parser: ArgumentParser, args: Namespace):
120 raise NotImplementedError("Not implemented yet.")
123def handle_purge_command(parser: ArgumentParser, args: Namespace):
124 bm = BenchmarkManager.singleton()
125 orphans = list(bm.folder.glob("*.bmr.json"))
126 for p in orphans:
127 logger.debug(f"Delete {p}")
128 p.unlink()
129 logger.info(f"Deleted {len(orphans)} uncombined temporary benchmark files.")
132def handle_report_command(parser: ArgumentParser, args: Namespace):
133 path = get_benchmark_filepath(args.input)
134 bm = BenchmarkManager.load(path)
136 ds = Dataset(
137 name=args.name,
138 bm=bm,
139 cols=split_tokens(args.columns),
140 dyn_col_name_attr=args.dyn_col_name,
141 dyn_col_value_attr=args.dyn_col_value,
142 filter=args.filter,
143 sort_cols=args.sort,
144 )
146 r = TablibReporter(ds)
147 r.report(format=args.format, out=args.output)
150def handle_tag_command(parser: ArgumentParser, args: Namespace):
151 source_path = get_benchmark_filepath(args.source, must_exist=True)
152 target_path = get_benchmark_filepath(args.name, must_exist=False)
154 if target_path.exists() and not args.force:
155 logger.error(f"Target file already exists: {target_path}")
156 logger.info("Hint: pass --force to overwrite.")
157 return 1
159 bm_file = BenchmarkSuiteFile(source_path)
160 bm_file.save_tag(args.name, replace=False, keep_time=args.keep_time)
162 if args.git_add:
163 logger.info(f"git add {target_path}")
165 result = subprocess.run(
166 ["git", "add", "-v", "-f", target_path],
167 stdout=subprocess.PIPE,
168 stderr=subprocess.STDOUT,
169 )
170 if result.returncode != 0:
171 logger.error(f"Error running `git add -f`: {result.stdout.decode()}")
172 return 1
173 logger.info(f"File '{target_path}' added to git index.")
174 return