15 import xml.dom.minidom
16 from collections
import deque
24 raise argparse.ArgumentTypeError(
"%s must be a positive integer" % value)
27 class StoreOrUpdateMin(argparse.Action):
29 def __call__(self, parser, namespace, values, option_string=None):
32 setattr(namespace, self.dest, values)
34 setattr(namespace, self.dest, min(namespace.j, values))
40 ps_command = subprocess.Popen(
"ps -o pid --ppid %d --noheaders" % parent_pid, shell=
True, stdout=subprocess.PIPE)
41 ps_output = ps_command.stdout.read()
43 for pid_str
in ps_output.split(
"\n")[:-1]:
50 os.kill(pid, signal.SIGKILL)
51 for child
in children:
55 global passed_benchmark
56 global total_benchmark
60 lines = open(named_list).readlines()
62 local_index = line_index
64 while local_index < len(lines)
and not (failure
and args.stop):
66 failed_output_file_name = os.path.join(cwd, args.tool +
"_failed_output")
67 if os.path.exists(failed_output_file_name):
68 os.remove(failed_output_file_name)
69 tool_return_value_file_name = os.path.join(cwd, args.tool +
"_return_value")
70 if args.restart
and os.path.exists(os.path.join(cwd, args.tool +
"_return_value")):
71 tool_return_value_file = open(tool_return_value_file_name,
"r") 72 return_value = tool_return_value_file.read() 73 tool_return_value_file.close() 74 if return_value ==
"0":
78 logging.info(
" SKIPPING --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
79 local_index = line_index
82 HLS_output_directory = os.path.join(cwd,
"HLS_output")
83 if os.path.exists(HLS_output_directory):
84 shutil.rmtree(HLS_output_directory)
85 output_file_name = os.path.join(cwd, args.tool +
"_execution_output")
86 output_file = open(output_file_name,
"w")
87 local_args = lines[local_index]
88 if local_args[0] ==
"\"":
89 local_args = local_args[1:-1]
90 if args.tool !=
"bambu" and args.tool !=
"zebu":
91 tokens = shlex.split(lines[local_index])
92 args_without_benchmark_name =
"" 94 if token.find(
"--benchmark-name") == -1:
95 args_without_benchmark_name += token +
" " 96 local_args = args_without_benchmark_name
97 local_command =
"ulimit " + args.ulimit +
"; exec timeout " + args.timeout +
" " + tool_exe
98 local_command = local_command +
" " + local_args
99 output_file.write(
"#" * 80 +
"\n")
100 output_file.write(
"cd " + cwd +
"; ")
101 output_file.write(local_command +
"\n")
102 output_file.write(
"#" * 80 +
"\n")
105 with lock_creation_destruction:
106 if not (failure
and args.stop):
107 children[thread_index] = subprocess.Popen(local_command, stderr=output_file, stdout=output_file, cwd=cwd, shell=
True, executable=
"/bin/bash")
109 return_value = children[thread_index].wait()
112 with lock_creation_destruction:
113 if return_value != 0
and (args.stop
or args.returnfail):
115 if failure
and args.stop:
116 for local_thread_index
in range(n_jobs):
117 if children[local_thread_index] !=
None:
118 if children[local_thread_index].poll() ==
None:
123 os.fsync(output_file.fileno())
125 tool_return_value_file = open(tool_return_value_file_name,
"w")
126 tool_return_value_file.write(
str(return_value))
127 tool_return_value_file.close()
128 args_file = open(os.path.join(cwd,
"args"),
"w")
129 args_file.write(lines[local_index])
131 if return_value == 0
and os.path.exists(os.path.join(cwd, args.tool +
"_results_0.xml")):
132 tool_results_file_name = os.path.join(cwd, args.tool +
"_results")
133 tool_results_file = open(tool_results_file_name,
"w")
134 tool_results_string =
"" 135 xml_document = xml.dom.minidom.parse(os.path.join(cwd, args.tool +
"_results_0.xml"))
136 if len(xml_document.getElementsByTagName(
"CYCLES")) > 0:
137 cycles_tag = xml_document.getElementsByTagName(
"CYCLES")[0]
138 tool_results_string = tool_results_string + cycles_tag.attributes[
"value"].value +
" CYCLES" 139 if len(xml_document.getElementsByTagName(
"CLOCK_SLACK")) > 0:
140 slack_tag = xml_document.getElementsByTagName(
"CLOCK_SLACK")[0]
141 tool_results_string = tool_results_string +
" *** " + slack_tag.attributes[
"value"].value +
"ns" 142 tool_results_file.write(tool_results_string)
143 tool_results_file.close()
144 if not (failure
and args.stop)
or (return_value != -9
and return_value != 0):
145 if return_value != 0:
146 shutil.copy(output_file_name,
str(os.path.join(os.path.dirname(output_file_name), args.tool +
"_failed_output")))
149 if return_value == 0:
150 passed_benchmark += 1
151 if not args.no_clean:
152 for sub
in os.listdir(cwd):
153 if os.path.isdir(os.path.join(cwd, sub)):
154 shutil.rmtree(os.path.join(cwd, sub))
156 if sub != args.tool +
"_return_value" and sub != args.tool +
"_execution_output" and sub != args.tool +
"_results_0.xml" and sub !=
"args":
157 os.remove(os.path.join(cwd, sub))
158 if os.path.exists(os.path.join(cwd, args.tool +
"_results_0.xml")):
159 logging.info(
" SUCCESS (" + tool_results_string +
") --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
161 logging.info(
" SUCCESS --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
162 elif return_value == 124:
163 logging.info(
" FAILURE (Timeout) --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
164 elif return_value == 153:
165 logging.info(
" FAILURE (File size limit exceeded) --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
167 logging.info(
" FAILURE --- OVERALL: " +
str(passed_benchmark) +
" passed, " +
str(total_benchmark-passed_benchmark) +
" failed, " +
str(len(lines)-total_benchmark) +
" queued --- " + lines[local_index].replace(
"\\",
""))
169 local_index = line_index
174 configuration_name =
"" 176 tokens = shlex.split(line)
178 if token.find(
"--configuration-name") != -1:
179 configuration_name = token[len(
"--configuration-name="):]
180 if token.find(
"--benchmark-name") != -1:
181 benchmark_name = token[len(
"--benchmark-name="):]
182 new_dir = os.path.join(abs_path, configuration_name, benchmark_name)
187 logging.info(
" Looking for file in " +
str(directory))
189 for element
in os.listdir(directory):
190 if os.path.isdir(os.path.join(directory, element)):
191 files = files.union(
SearchCFiles(os.path.join(directory, element)))
192 elif (element[-2:] ==
".c")
or (element[-2:] ==
".C")
or (element[-4:] ==
".CPP")
or (element[-4:] ==
".cpp")
or (element[-4:] ==
".cxx")
or (element[-3:] ==
".cc")
or (element[-4:] ==
".c++"):
193 files.add(os.path.join(directory, element))
199 if os.path.exists(os.path.join(directory, args.tool +
"_return_value"))
or os.listdir(directory) == []:
201 subdirs = [s
for s
in sorted(os.listdir(directory))
if os.path.isdir(os.path.join(directory,s))
and s !=
"panda-temp" and s !=
"HLS_output"]
202 for subdir
in subdirs:
204 tool_failed_output = open(os.path.join(directory, args.tool +
"_failed_output"),
"w")
205 for subdir
in subdirs:
206 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_failed_output")):
207 tool_failed_output.write(open(os.path.join(directory, subdir, args.tool +
"_failed_output")).
read())
208 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_execution_output")):
209 tool_failed_output.write(
"\n")
210 tool_failed_output.write(
"\n")
211 tool_failed_output.write(
"\n")
212 tool_failed_output.close()
213 report_file = open(os.path.join(directory,
"report"),
"w")
214 for subdir
in subdirs:
215 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_return_value")):
216 return_value_file_name = os.path.join(directory, subdir, args.tool +
"_return_value")
217 return_value_file = open(return_value_file_name)
218 return_value = return_value_file.read()
219 return_value_file.close()
220 args_file = open(os.path.join(directory, subdir,
"args"))
221 command_args = args_file.readlines()[0]
222 command_args = command_args.replace(abs_benchmarks_root +
"/",
"")
224 if return_value ==
"0":
225 tool_results_file_name = os.path.join(directory, subdir, args.tool +
"_results")
226 if os.path.exists(tool_results_file_name):
227 report_file.write(
"SUCCESS (" + open(tool_results_file_name).
read() +
" cycles) " + command_args.replace(
"\\",
""))
229 report_file.write(
"SUCCESS: " + command_args.replace(
"\\",
""))
231 if return_value ==
"124":
232 report_file.write(
"FAILURE(Timeout): " + command_args.replace(
"\\",
""))
234 report_file.write(
"FAILURE: " + command_args.replace(
"\\",
""))
235 report_file.write(
"\n")
236 elif os.path.exists(os.path.join(directory, subdir,
"report")):
237 local_report_file = open(os.path.join(directory, subdir,
"report"))
238 report_file.write(local_report_file.read())
239 local_report_file.close()
241 if args.tool ==
"bambu":
243 named_list_name = os.path.join(abs_path,
"named_list")
244 lines = open(named_list_name).readlines()
247 if os.path.exists(os.path.join(local_dir, args.tool +
"_results_0.xml")):
248 local_args = local_args +
" " + os.path.join(local_dir, args.tool +
"_results_0.xml")
249 if len(local_args) > 0:
251 experimental_setup_file_name = os.path.join(abs_path,
"experimental_setup.xml")
252 temp_list = open(experimental_setup_file_name,
"w")
253 bambu_version_file_name = os.path.join(abs_path,
"bambu_version")
254 bambu_version_file = open(bambu_version_file_name,
"w")
255 bambu_version_command = [tool_exe]
256 bambu_version_command.extend(shlex.split(
"--version"))
257 subprocess.call(bambu_version_command, stdout=bambu_version_file)
258 bambu_version_file.close()
259 bambu_version_file = open(bambu_version_file_name,
"r") 260 bambu_version = bambu_version_file.readlines()[-2].rstrip() 261 bambu_version_file.close() 262 if args.commonargs !=
None:
263 bambu_arguments =
' '.join(
' '.join(
map(str,l))
for l
in args.commonargs)
266 temp_list.write(
"<?xml version=\"1.0\"?>\n")
267 temp_list.write(
"<experimental_setup>\n")
268 temp_list.write(
" <bambu_version value=\"" + bambu_version +
"\"/>\n")
269 temp_list.write(
" <timestamp value=\"" +
str(datetime.datetime.now()) +
"\"/>\n")
270 temp_list.write(
" <script value=\"" + args.script +
"\"/>\n")
271 temp_list.write(
" <bambu_arguments value=\"" + bambu_arguments +
"\"/>\n")
272 temp_list.write(
" <benchmarks>\n")
273 reordered_list_name = os.path.join(abs_path,
"reordered_list")
274 reordered_list = open(reordered_list_name,
"r") 275 for line
in reordered_list.readlines():
276 temp_list.write(
" <benchmark value=\"" + line.rstrip().replace(
"\\\\\\-",
"-").replace(
"\\\\\\=",
"=").replace(
"\\\\\\_",
"_").replace(
"\\\\\\/",
"/").replace(
"\\\\\\\"",
""").replace(
"\\\\\\.",
".") +
"\"/>\n")
277 temp_list.write(
" </benchmarks>\n")
278 temp_list.write(
"</experimental_setup>\n")
280 local_args = local_args +
" " + experimental_setup_file_name
281 if os.path.exists(args.spider_style) :
282 local_args = local_args +
" " + args.spider_style +
" " + table
284 local_args = local_args +
" " + os.path.join(os.path.dirname(spider), args.spider_style) +
" " + table
286 logging.info(
" Executing " + spider)
287 local_command = [spider]
288 local_command.extend(shlex.split(local_args))
289 return_value = subprocess.call(local_command)
290 logging.info(
"Collected results of " + directory)
296 if os.path.exists(os.path.join(directory, args.tool +
"_return_value"))
or os.listdir(directory) == []:
298 subdirs = [s
for s
in sorted(os.listdir(directory))
if os.path.isdir(os.path.join(directory,s))
and s !=
"panda-temp" and s !=
"HLS_output"]
299 print_testsuite =
False 300 for subdir
in subdirs:
301 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_return_value")):
302 print_testsuite =
True 304 failed_counter_file_name = os.path.join(abs_path,
"failed_counter")
306 if os.path.exists(failed_counter_file_name):
307 failed_counter_file = open(failed_counter_file_name)
308 failed_counter = failed_counter_file.read()
310 if print_testsuite
and len(subdirs) > 0:
311 ju_file.write(
" <testsuite disabled=\"0\" errors=\"0\" failures=\""+failed_counter+
"\" name=\""+directory+
"\" tests=\""+
str(len(subdirs)) +
"\" timestamp=\"" +
str(datetime.datetime.now()) +
"\">\n")
312 for subdir
in subdirs:
313 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_return_value")):
314 return_value_file_name = os.path.join(directory, subdir, args.tool +
"_return_value")
315 return_value_file = open(return_value_file_name)
316 return_value = return_value_file.read()
317 return_value_file.close()
318 args_file = open(os.path.join(directory, subdir,
"args"))
319 command_args = args_file.readlines()[0]
320 command_args = command_args.replace(abs_benchmarks_root +
"/",
"")
322 if return_value ==
"0":
323 ju_file.write(
" <testcase classname=\"PandA-bambu-tests\" name=\"" + command_args.replace(
"\\",
"") +
"\">\n")
325 if return_value ==
"124":
326 ju_file.write(
" <testcase classname=\"PandA-bambu-tests\" name=\"" + command_args.replace(
"\\",
"") +
"\">\n")
327 ju_file.write(
" <failure type=\"FAILURE(Timeout)\"></failure>\n")
328 ju_file.write(
" <system-out>\n")
329 ju_file.write(
"<![CDATA[\n")
331 ju_file.write(
" <testcase classname=\"PandA-bambu-tests\" name=\"" + command_args.replace(
"\\",
"") +
"\">\n")
332 ju_file.write(
" <failure type=\"FAILURE\"></failure>\n")
333 ju_file.write(
" <system-out>\n")
334 ju_file.write(
"<![CDATA[\n")
335 with open(os.path.join(directory, args.tool +
"_failed_output"))
as f:
336 for line
in deque(f, maxlen=15):
338 ju_file.write(
"]]>\n")
339 ju_file.write(
" </system-out>\n")
340 ju_file.write(
" </testcase>\n")
341 if print_testsuite
and len(subdirs) > 0:
342 ju_file.write(
" </testsuite>\n")
348 if os.path.exists(os.path.join(directory, args.tool +
"_return_value"))
or os.listdir(directory) == []:
350 subdirs = [s
for s
in sorted(os.listdir(directory))
if os.path.isdir(os.path.join(directory,s))
and s !=
"panda-temp" and s !=
"HLS_output"]
351 print_testsuite =
False 352 for subdir
in subdirs:
353 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_return_value")):
354 print_testsuite =
True 357 for subdir
in subdirs:
358 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_return_value")):
359 pp_file.write(
" <test name=\""+
str(directory)+
"/"+
str(subdir) +
"\" executed=\"yes\">\n")
360 pp_file.write(
" <result>\n")
361 return_value_file_name = os.path.join(directory, subdir, args.tool +
"_return_value")
362 return_value_file = open(return_value_file_name)
363 return_value = return_value_file.read()
364 return_value_file.close()
365 args_file = open(os.path.join(directory, subdir,
"args"))
366 command_args = args_file.readlines()[0]
367 command_args = command_args.replace(abs_benchmarks_root +
"/",
"")
369 if return_value ==
"0":
370 pp_file.write(
" <success passed=\"yes\" state=\"100\" hasTimedOut=\"false\"/>\n")
382 HLS_execution_time_tag =
"" 383 if os.path.exists(os.path.join(directory, subdir, args.tool +
"_results_0.xml")):
384 xml_document = xml.dom.minidom.parse(os.path.join(directory, subdir, args.tool +
"_results_0.xml"))
385 if len(xml_document.getElementsByTagName(
"CYCLES")) > 0:
386 cycles_tag =
str(xml_document.getElementsByTagName(
"CYCLES")[0].attributes[
"value"].value)
387 if len(xml_document.getElementsByTagName(
"AREAxTIME")) > 0:
388 areatime_tag =
str(xml_document.getElementsByTagName(
"AREAxTIME")[0].attributes[
"value"].value)
389 if len(xml_document.getElementsByTagName(
"SLICE")) > 0:
390 slice_tag =
str(xml_document.getElementsByTagName(
"SLICE")[0].attributes[
"value"].value)
391 if len(xml_document.getElementsByTagName(
"SLICE_LUTS")) > 0:
392 sliceluts_tag =
str(xml_document.getElementsByTagName(
"SLICE_LUTS")[0].attributes[
"value"].value)
393 if len(xml_document.getElementsByTagName(
"REGISTERS")) > 0:
394 registers_tag =
str(xml_document.getElementsByTagName(
"REGISTERS")[0].attributes[
"value"].value)
395 if len(xml_document.getElementsByTagName(
"DSPS")) > 0:
396 dsps_tag =
str(xml_document.getElementsByTagName(
"DSPS")[0].attributes[
"value"].value)
397 if len(xml_document.getElementsByTagName(
"BRAMS")) > 0:
398 brams_tag =
str(xml_document.getElementsByTagName(
"BRAMS")[0].attributes[
"value"].value)
399 if len(xml_document.getElementsByTagName(
"PERIOD")) > 0:
400 period_tag =
str(xml_document.getElementsByTagName(
"PERIOD")[0].attributes[
"value"].value)
401 if len(xml_document.getElementsByTagName(
"CLOCK_SLACK")) > 0:
402 slack_tag =
str(xml_document.getElementsByTagName(
"CLOCK_SLACK")[0].attributes[
"value"].value)
403 if len(xml_document.getElementsByTagName(
"FREQUENCY")) > 0:
404 frequency_tag =
str(xml_document.getElementsByTagName(
"FREQUENCY")[0].attributes[
"value"].value)
405 if len(xml_document.getElementsByTagName(
"HLS_execution_time")) > 0:
406 HLS_execution_time_tag =
str(xml_document.getElementsByTagName(
"HLS_execution_time")[0].attributes[
"value"].value)
409 pp_file.write(
" <performance unit=\"cycles\" mesure=\""+ cycles_tag +
"\" isRelevant=\"true\"/>\n")
410 if HLS_execution_time_tag !=
"":
411 pp_file.write(
" <executiontime unit=\"s\" mesure=\""+ HLS_execution_time_tag +
"\" isRelevant=\"true\"/>\n")
413 if areatime_tag !=
"" or slice_tag !=
"" or sliceluts_tag !=
"" or registers_tag !=
"" or dsps_tag !=
"" or brams_tag !=
"" or period_tag !=
"" or dsps_tag !=
"" or slack_tag !=
"" or frequency_tag !=
"":
414 pp_file.write(
" <metrics>\n")
415 if areatime_tag !=
"":
416 pp_file.write(
" <areatime unit=\"lutxns\" mesure=\""+ areatime_tag +
"\" isRelevant=\"true\"/>\n")
418 pp_file.write(
" <slices unit=\"ns\" mesure=\""+ slice_tag +
"\" isRelevant=\"true\"/>\n")
419 if sliceluts_tag !=
"":
420 pp_file.write(
" <sliceluts unit=\"slice\" mesure=\""+ sliceluts_tag +
"\" isRelevant=\"true\"/>\n")
421 if registers_tag !=
"":
422 pp_file.write(
" <registers unit=\"registers\" mesure=\""+ registers_tag +
"\" isRelevant=\"true\"/>\n")
424 pp_file.write(
" <dsps unit=\"dsp\" mesure=\""+ dsps_tag +
"\" isRelevant=\"true\"/>\n")
426 pp_file.write(
" <brams unit=\"bram\" mesure=\""+ brams_tag +
"\" isRelevant=\"true\"/>\n")
428 pp_file.write(
" <period unit=\"ns\" mesure=\""+ period_tag +
"\" isRelevant=\"true\"/>\n")
430 pp_file.write(
" <slack unit=\"ns\" mesure=\""+ slack_tag +
"\" isRelevant=\"true\"/>\n")
431 if frequency_tag !=
"":
432 pp_file.write(
" <frequency unit=\"MHz\" mesure=\""+ frequency_tag +
"\" isRelevant=\"true\"/>\n")
433 pp_file.write(
" </metrics>\n")
435 if return_value ==
"124":
436 pp_file.write(
" <success passed=\"no\" state=\"0\" hasTimedOut=\"true\"/>\n")
438 pp_file.write(
" <success passed=\"no\" state=\"0\" hasTimedOut=\"false\"/>\n")
439 pp_file.write(
" </result>\n")
440 pp_file.write(
" </test>\n")
442 parser = argparse.ArgumentParser(description=
"Performs panda tests", fromfile_prefix_chars=
'@')
443 parser.add_argument(
"files", help=
"The files to be tested: they can be configuration files, directories containing benchmarks or source code files.", nargs=
'*', action=
"append")
444 parser.add_argument(
'-l',
"--benchmarks_list", help=
"The file containing the list of tests to be performed", nargs=
'*', action=
"append")
445 parser.add_argument(
'-b',
"--benchmarks_root", help=
"The directory containing benchmarks")
446 parser.add_argument(
'-o',
"--output", help=
"The directory where output files we be put (default=\"output\")", default=
"output")
447 parser.add_argument(
'-j', help=
"The number of jobs which execute the benchmarks (default=\"1\")", default=1, type=positive_integer, action=StoreOrUpdateMin)
448 parser.add_argument(
"--bambu", help=
"The bambu executable (default=/opt/panda/bin/bambu)", default=
"/opt/panda/bin/bambu")
449 parser.add_argument(
"--spider", help=
"The spider executable (default=/opt/panda/bin/spider)", default=
"/opt/panda/bin/spider")
450 parser.add_argument(
"--spider-style", help=
"The spider table style relative to the spider executable (default=../lib/latex_format_bambu_results.xml)", default=
"../lib/latex_format_bambu_results.xml")
451 parser.add_argument(
"--zebu", help=
"The zebu executable (default=/opt/panda/bin/zebu)", default=
"/opt/panda/bin/zebu")
452 parser.add_argument(
'-t',
"--timeout", help=
"Timeout for tool execution (default=60m)", default=
"60m")
453 parser.add_argument(
'-a',
"--args", help=
"A set of arguments to be passed to the tool", nargs=
'*', action=
'append')
454 parser.add_argument(
'-c',
"--commonargs", help=
"A set of arguments to be passed to the tool", nargs=
'*', action=
'append')
455 parser.add_argument(
"--table", help=
"Print the results in tex format", default=
"results.tex")
456 parser.add_argument(
"--tool", help=
"The tool to be tested", default=
"bambu")
457 parser.add_argument(
"--ulimit", help=
"The ulimit options", default=
"-f 2097152 -v 8388608 -s 16384")
458 parser.add_argument(
"--stop", help=
"Stop the execution on first error (default=false)", default=
False, action=
"store_true")
459 parser.add_argument(
"--returnfail", help=
"Return FAILURE in case at least one test fails (default=false)", default=
False, action=
"store_true")
460 parser.add_argument(
"--mail", help=
"Send a mail with the result")
461 parser.add_argument(
"--name", help=
"Set the name of this regression (default=Bambu regression)", nargs=
'*', action=
'append')
462 parser.add_argument(
"--no-clean", help=
"Do not clean produced files", default=
False, action=
"store_true")
463 parser.add_argument(
"--restart", help=
"Restart last execution (default=false)", default=
False, action=
"store_true")
464 parser.add_argument(
"--script", help=
"Set the bash script in the generated tex", default=
"")
465 parser.add_argument(
"--junitdir", help=
"Set the JUnit directory", default=
"")
466 parser.add_argument(
"--perfpublisherdir", help=
"Set the PerfPublisher directory", default=
"")
468 args = parser.parse_args()
470 logging.basicConfig(level=logging.INFO,format=
'%(levelname)s: %(message)s')
473 abs_script = os.path.abspath(sys.argv[0])
477 modified_argv.append(sys.argv[0])
478 abs_configuration_dir=
"" 479 for arg
in sys.argv[1:]:
480 if arg
in args.files[0]:
482 if (arg[-2:] ==
".c")
or (arg[-2:] ==
".C")
or (arg[-4:] ==
".CPP")
or (arg[-4:] ==
".cpp")
or (arg[-4:] ==
".cxx")
or (arg[-3:] ==
".cc")
or (arg[-4:] ==
".c++"):
483 modified_argv.append(arg)
485 elif os.path.exists(arg)
and os.path.isdir(arg):
486 modified_argv.append(arg)
487 elif args.benchmarks_root !=
None and os.path.exists(os.path.join(os.path.abspath(args.benchmarks_root), arg))
and os.path.isdir(os.path.join(os.path.abspath(args.benchmarks_root), arg)):
488 modified_argv.append(arg)
489 elif os.path.exists(os.path.join(os.path.dirname(abs_script), arg))
and os.path.isdir(os.path.join(os.path.dirname(abs_script), arg)):
490 modified_argv.append(arg)
492 modified_argv.append(
"@" + arg)
494 modified_argv.append(arg)
495 args = parser.parse_args(modified_argv)
498 abs_script = os.path.abspath(sys.argv[0])
501 table = os.path.abspath(args.table)
503 if os.path.exists(args.output)
and not args.restart:
504 logging.error(
"Output directory " + args.output +
" already exists. Please remove it or specify a different one with -o")
508 if args.junitdir !=
"" and not os.path.exists(args.junitdir):
509 os.mkdir(args.junitdir)
511 if args.perfpublisherdir !=
"" and not os.path.exists(args.perfpublisherdir):
512 os.mkdir(args.perfpublisherdir)
515 if args.junitdir !=
"":
517 junit_file_name = os.path.abspath(os.path.join(args.junitdir,
"Junit_report"+
str(junit_index)+
".xml"))
518 while os.path.isfile(junit_file_name) :
519 junit_index = junit_index + 1
520 junit_file_name = os.path.abspath(os.path.join(args.junitdir,
"Junit_report"+
str(junit_index)+
".xml"))
522 perfpublisher_name =
"" 523 perfpublisher_file_name =
"" 524 if args.perfpublisherdir !=
"":
525 perfpublisher_index = 0
526 perfpublisher_name =
"PerfPublisher_report"+
str(perfpublisher_index)
527 perfpublisher_file_name = os.path.abspath(os.path.join(args.perfpublisherdir, perfpublisher_name+
".xml"))
528 while os.path.isfile(perfpublisher_file_name) :
529 perfpublisher_index = perfpublisher_index + 1
530 perfpublisher_name =
"PerfPublisher_report"+
str(perfpublisher_index)
531 perfpublisher_file_name = os.path.abspath(os.path.join(args.perfpublisherdir, perfpublisher_name+
".xml"))
534 abs_path = os.path.abspath(args.output)
537 if not os.path.exists(abs_path):
546 failed_counter_file_name = os.path.join(abs_path,
"failed_counter")
547 if os.path.exists(failed_counter_file_name):
548 failed_counter_file = open(failed_counter_file_name)
549 failed_counter = failed_counter_file.read()
550 if failed_counter ==
"0" and args.junitdir ==
"" and args.perfpublisherdir ==
"":
551 logging.info(
"Already pass")
556 if args.tool ==
"bambu":
557 if os.path.isfile(args.bambu)
and os.access(args.bambu, os.X_OK):
558 tool_exe = args.bambu
561 for path
in os.environ[
"PATH"].split(os.pathsep):
562 exe_file = os.path.join(path,
"bambu")
563 if os.path.isfile(exe_file)
and os.access(exe_file, os.X_OK):
566 if args.bambu !=
"opt/panda/bin/bambu":
567 if not os.path.isfile(args.bambu):
568 logging.error(args.bambu +
" does not exist")
570 logging.error(args.bambu +
" is not an executable")
572 logging.error(
"bambu not found")
574 logging.info(
"Bambu found: " + tool_exe)
575 elif args.tool ==
"zebu":
576 if os.path.isfile(args.zebu)
and os.access(args.zebu, os.X_OK):
580 for path
in os.environ[
"PATH"].split(os.pathsep):
581 exe_file = os.path.join(path,
"zebu")
582 if os.path.isfile(exe_file)
and os.access(exe_file, os.X_OK):
585 if args.zebu !=
"opt/panda/bin/zebu":
586 if not os.path.isfile(args.zebu):
587 logging.error(args.zebu +
" does not exist")
589 logging.error(args.zebu +
" is not an executable")
591 logging.error(
"zebu not found")
593 logging.info(
"Zebu found: " + tool_exe)
596 if distutils.spawn.find_executable(tool_exe) ==
None:
597 logging.error(tool_exe +
" not found")
600 if args.benchmarks_root
is None:
601 abs_benchmarks_root = abs_configuration_dir
603 if os.path.isabs(args.benchmarks_root):
604 abs_benchmarks_root = os.path.abspath(args.benchmarks_root)
606 if os.path.exists(os.path.join(os.path.abspath(
".."), args.benchmarks_root)):
607 abs_benchmarks_root = os.path.join(os.path.abspath(
".."), args.benchmarks_root)
609 if os.path.exists(os.path.join(os.path.abspath(os.path.join(os.path.dirname(abs_script),
"../../..")), args.benchmarks_root)):
610 abs_benchmarks_root = os.path.join(os.path.abspath(os.path.join(os.path.dirname(abs_script),
"../../..")), args.benchmarks_root)
612 logging.error(args.benchmarks_root +
" not found")
617 if os.path.isfile(args.spider)
and os.access(args.spider, os.X_OK):
621 for path
in os.environ[
"PATH"].split(os.pathsep):
622 exe_file = os.path.join(path,
"spider")
623 if os.path.isfile(exe_file)
and os.access(exe_file, os.X_OK):
626 if args.spider !=
"opt/panda/bin/spider":
627 if not os.path.isfile(args.spider):
628 logging.error(args.spider +
" does not exist")
630 logging.error(args.spider +
" is not an executable")
632 logging.error(
"spider not found")
635 if args.benchmarks_root
is None:
636 abs_benchmarks_root = abs_configuration_dir
638 if os.path.isabs(args.benchmarks_root):
639 abs_benchmarks_root = os.path.abspath(args.benchmarks_root)
641 if os.path.exists(os.path.join(os.path.abspath(
".."), args.benchmarks_root)):
642 abs_benchmarks_root = os.path.join(os.path.abspath(
".."), args.benchmarks_root)
644 if os.path.exists(os.path.join(os.path.abspath(os.path.join(os.path.dirname(abs_script),
"../../..")), args.benchmarks_root)):
645 abs_benchmarks_root = os.path.join(os.path.abspath(os.path.join(os.path.dirname(abs_script),
"../../..")), args.benchmarks_root)
647 logging.error(args.benchmarks_root +
" not found")
650 logging.info(
"Spider found: " + spider)
653 if args.mail !=
None:
655 for path
in os.environ[
"PATH"].split(os.pathsep):
656 exe_file = os.path.join(path,
"mutt")
657 if os.path.isfile(exe_file)
and os.access(exe_file, os.X_OK):
660 logging.error(
"mutt not found")
667 if args.benchmarks_list !=
None:
668 for relative_list
in args.benchmarks_list:
670 if os.path.exists(os.path.abspath(
"../" + relative_list[0])):
671 abs_lists.append(os.path.abspath(
"../" + relative_list[0]))
673 elif os.path.exists(os.path.join(os.path.dirname(abs_script), relative_list[0])):
674 abs_lists.append(os.path.join(os.path.dirname(abs_script), relative_list[0]))
676 elif os.path.exists(os.path.join(abs_configuration_dir, relative_list[0])):
677 abs_lists.append(os.path.join(abs_configuration_dir, relative_list[0]))
679 elif os.path.exists(os.path.join(abs_benchmarks_root, relative_list[0])):
680 abs_lists.append(os.path.join(abs_benchmarks_root, relative_list[0]))
682 logging.error(relative_list[0] +
" does not exist")
686 for arg
in args.files[0]:
688 if (arg[-2:] ==
".c")
or (arg[-2:] ==
".C")
or (arg[-4:] ==
".CPP")
or (arg[-4:] ==
".cpp")
or (arg[-4:] ==
".cxx")
or (arg[-3:] ==
".cc")
or (arg[-4:] ==
".c++"):
689 files_list.append(arg)
691 elif os.path.exists(arg)
and os.path.isdir(arg):
692 files_list.append(arg)
693 elif os.path.exists(os.path.join(os.path.dirname(abs_script), arg))
and os.path.isdir(os.path.join(os.path.dirname(abs_script), arg)):
694 files_list.append(arg)
695 elif os.path.exists(os.path.join(abs_benchmarks_root, arg))
and os.path.isdir(os.path.join(abs_benchmarks_root, arg)):
696 files_list.append(os.path.join(abs_benchmarks_root, arg))
698 if args.benchmarks_list ==
None and len(files_list) == 0
and (args.tool ==
"bambu" or args.tool ==
"zebu"):
699 logging.error(
"Benchmarks not found")
702 if len(files_list) > 0:
703 temp_list = open(os.path.join(abs_path,
"temp_list"),
"w")
704 for element
in files_list:
705 temp_list.write(element)
707 abs_lists.append(os.path.join(abs_path,
"temp_list"))
710 reordered_list_name = os.path.join(abs_path,
"reordered_list")
711 reordered_list = open(reordered_list_name,
"w")
713 logging.info(
"Preparing benchmark list")
714 logging.info(
" Reordering arguments")
715 for abs_list
in abs_lists:
716 list_file = open(abs_list)
717 lines = list_file.readlines()
720 if line.strip() ==
"":
724 if args.tool !=
"bambu" and args.tool !=
"zebu":
725 reordered_list.write(line)
727 tokens = shlex.split(line)
733 parameters.append(re.escape(token))
734 if token.find(
"--param") != -1:
737 follow_param =
False;
739 if follow_param ==
True:
740 parameters.append(re.escape(token))
742 reordered_list.write(token +
" ")
743 follow_param =
False;
744 for parameter
in parameters:
745 reordered_list.write(re.escape(parameter) +
" ")
746 reordered_list.write(
"\n")
747 reordered_list.close()
750 expanded_list_name = os.path.join(abs_path,
"expanded_list")
751 expanded_list = open(expanded_list_name,
"w")
753 logging.info(
" Expanding directory")
754 lines = open(reordered_list_name).readlines()
756 if line.strip() ==
"":
758 tokens = shlex.split(line)
759 if args.tool ==
"bambu" or args.tool ==
"zebu":
760 if(tokens[0][0] !=
'/'):
761 first_parameter = os.path.join(abs_benchmarks_root, tokens[0])
763 first_parameter = tokens[0]
765 first_parameter = tokens[0].replace(
"BENCHMARKS\_ROOT", abs_benchmarks_root)
766 other_parameters = tokens[1:len(tokens)]
767 if not os.path.exists(first_parameter)
and (args.tool ==
"bambu" or args.tool ==
"zebu"):
768 logging.error(first_parameter +
" does not exist")
771 if os.path.isdir(first_parameter):
772 logging.info(
" " + tokens[0])
774 c_files = sorted(c_files)
775 for c_file
in c_files:
776 expanded_list.write(c_file)
777 for other_parameter
in other_parameters:
778 expanded_list.write(
" " + other_parameter.replace(
"BENCHMARKS\_ROOT", abs_benchmarks_root))
779 expanded_list.write(
"\n")
781 expanded_list.write(first_parameter)
782 for other_parameter
in other_parameters:
783 if ((other_parameter[-2:] ==
".c")
or (other_parameter[-2:] ==
".C")
or (other_parameter[-4:] ==
".CPP")
or (other_parameter[-4:] ==
".cpp")
or (other_parameter[-4:] ==
".cxx")
or (other_parameter[-3:] ==
".cc")
or (other_parameter[-4:] ==
".c++")
or other_parameter[-4:] ==
".xml")
and other_parameter[0] !=
'\\':
784 if other_parameter[0] ==
'/':
785 expanded_list.write(
" " + other_parameter)
787 expanded_list.write(
" " + os.path.join(abs_benchmarks_root, other_parameter))
789 expanded_list.write(
" " + other_parameter.replace(
"BENCHMARKS\_ROOT", abs_benchmarks_root).replace(
"BENCHMARKS_ROOT", abs_benchmarks_root))
790 expanded_list.write(
"\n")
791 expanded_list.close()
794 logging.info(
" Considering all tool arguments")
795 arg_lists = args.args
798 arged_list_name = os.path.join(abs_path,
"arged_list")
799 arged_list = open(arged_list_name,
"w")
800 lines = open(expanded_list_name).readlines()
801 for arg_list
in arg_lists:
803 arged_list.write(line.rstrip())
804 if len(arg_list) > 0:
808 arged_list.write(
" " + arg)
809 if args.commonargs !=
None and len(args.commonargs) > 0:
810 for commonarg
in args.commonargs:
811 arged_list.write(
" " + commonarg[0].replace(
"#",
" "))
812 arged_list.write(
"\n")
820 logging.info(
" Adding benchmark name")
821 named_list_name = os.path.join(abs_path,
"named_list")
822 named_list = open(named_list_name,
"w")
823 lines = open(arged_list_name).readlines()
825 named_list.write(line.rstrip())
827 configuration_name =
"" 829 tokens = shlex.split(line)
831 if token.find(
"--configuration-name") != -1:
832 configuration_name = token[len(
"--configuration-name="):]
833 if token.find(
"--benchmark-name") != -1:
834 benchmark_name = token[len(
"--benchmark-name="):]
835 if benchmark_name ==
"":
836 if args.tool !=
"bambu" and args.tool !=
"zebu":
837 logging.error(
"Missing benchmark name")
839 benchmark_name = os.path.basename(line.split()[0])[:-2]
840 named_list.write(
" --benchmark-name=" + benchmark_name)
841 full_name = configuration_name +
":" + benchmark_name
842 logging.info(
" " + full_name)
843 if full_name
in full_names:
844 logging.error(
"Duplicated configuration name - benchmark name: " + full_name)
846 full_names.add(full_name)
847 named_list.write(
"\n")
852 logging.info(
" Generating output directories")
853 lines = open(named_list_name).readlines()
856 logging.info(
" Creating directory " + new_dir)
859 logging.info(
" Skipping generation of lists and directories")
860 named_list_name = os.path.join(abs_path,
"named_list")
861 if not os.path.exists(named_list_name):
862 logging.error(
"List of previous run not found")
866 logging.info(
" Launching tool")
867 lock = threading.RLock()
868 lock_creation_destruction = threading.RLock()
872 children = [
None] * n_jobs
873 for thread_index
in range(n_jobs):
874 threads.insert(thread_index, threading.Thread(target=execute_tests, args=(named_list_name, thread_index)))
875 threads[thread_index].daemon=
True 876 threads[thread_index].start()
880 for thread_index
in range(n_jobs):
881 while threads[thread_index].isAlive():
882 threads[thread_index].join(100)
883 except KeyboardInterrupt:
884 logging.error(
"SIGINT received")
886 for local_thread_index
in range(n_jobs):
887 if children[local_thread_index] !=
None:
888 if children[local_thread_index].poll() ==
None:
899 if args.junitdir !=
"":
900 junit_file = open(junit_file_name,
"w")
901 junit_file.write(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
902 junit_file.write(
"<testsuites disabled=\"0\" errors=\"0\" failures=\""+
str(total_benchmark - passed_benchmark)+
"\" name=\"" + abs_path +
"\" tests=\"" +
str(total_benchmark) +
"\">\n")
904 junit_file.write(
"</testsuites>\n")
907 if args.perfpublisherdir !=
"":
908 perfpublisher_file = open(perfpublisher_file_name,
"w")
909 perfpublisher_file.write(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
910 perfpublisher_file.write(
"<report name=\""+ perfpublisher_name +
"\" categ=\"PandA-Report\">\n")
912 perfpublisher_file.write(
"</report>\n")
915 if args.tool ==
"bambu" or args.tool ==
"zebu":
916 report_file_name = os.path.join(abs_path,
"report")
917 report_file = open(report_file_name)
918 lines = report_file.readlines()
920 report_file = open(report_file_name,
"w")
921 command = [tool_exe,
"--version"]
922 subprocess.call(command, stderr=report_file, stdout=report_file)
923 report_file.write(
"SYSTEM INFORMATION:\n")
925 command = [
"lsb_release",
"-a"]
926 subprocess.call(command, stderr=report_file, stdout=report_file)
927 report_file.write(
"\n")
928 report_file.write(
"CURRENT TIME:\n")
929 report_file.write(
str(datetime.datetime.now())+
"\n\n")
930 report_file.write(
"PASSED TESTS:\n")
931 report_file.write(
str(passed_benchmark) +
"/" +
str(total_benchmark) +
"\n\n")
933 failed_counter_file_name = os.path.join(abs_path,
"failed_counter")
934 failed_counter_file = open(failed_counter_file_name,
"w")
935 failed_counter_file.write(
str(total_benchmark - passed_benchmark))
936 failed_counter_file.close()
939 report_file.write(line)
942 if args.mail !=
None:
950 outcome =
str(passed_benchmark) +
"/" +
str(total_benchmark)
953 for name
in args.name:
954 full_name = full_name + name[0]
957 local_command =
"cat " + report_file_name +
" | mutt -s \"" + full_name +
": " + outcome +
"\" " + args.mail
958 subprocess.call(local_command, shell=
True)
def CreateJunitBody(directory, ju_file)
def SearchCFiles(directory)
unsigned map[NUM_VERTICES]
def GetChildren(parent_pid)
def __call__(self, parser, namespace, values, option_string=None)
def CreatePerfPublisherBody(directory, pp_file)
def CollectResults(directory)
def ComputeDirectory(line)
short int read(short int *data)
def execute_tests(named_list, thread_index)