[dexter] Don't generate results files by default
Dexter saves various files to a new results directory each time it is run (including when it's run by lit tests) and there isn't a way to opt-out. This patch reconfigures the behaviour to be opt-in by removing the default `--results-directory` location. Now results are only saved if `--results-directory` is specified. Reviewed By: jmorse Differential Revision: https://reviews.llvm.org/D119545
This commit is contained in:
parent
52fbb786a6
commit
2bd62e0b04
|
@ -46,10 +46,8 @@ class TestToolBase(ToolBase):
|
||||||
'--results-directory',
|
'--results-directory',
|
||||||
type=str,
|
type=str,
|
||||||
metavar='<directory>',
|
metavar='<directory>',
|
||||||
default=os.path.abspath(
|
default=None,
|
||||||
os.path.join(get_root_directory(), '..', 'results',
|
help='directory to save results (default: none)')
|
||||||
datetime.now().strftime('%Y-%m-%d-%H%M-%S'))),
|
|
||||||
help='directory to save results')
|
|
||||||
|
|
||||||
def handle_options(self, defaults):
|
def handle_options(self, defaults):
|
||||||
options = self.context.options
|
options = self.context.options
|
||||||
|
@ -86,14 +84,15 @@ class TestToolBase(ToolBase):
|
||||||
'<d>could not find test path</> <r>"{}"</>'.format(
|
'<d>could not find test path</> <r>"{}"</>'.format(
|
||||||
options.test_path))
|
options.test_path))
|
||||||
|
|
||||||
options.results_directory = os.path.abspath(options.results_directory)
|
if options.results_directory:
|
||||||
if not os.path.isdir(options.results_directory):
|
options.results_directory = os.path.abspath(options.results_directory)
|
||||||
try:
|
if not os.path.isdir(options.results_directory):
|
||||||
os.makedirs(options.results_directory, exist_ok=True)
|
try:
|
||||||
except OSError as e:
|
os.makedirs(options.results_directory, exist_ok=True)
|
||||||
raise Error(
|
except OSError as e:
|
||||||
'<d>could not create directory</> <r>"{}"</> <y>({})</>'.
|
raise Error(
|
||||||
format(options.results_directory, e.strerror))
|
'<d>could not create directory</> <r>"{}"</> <y>({})</>'.
|
||||||
|
format(options.results_directory, e.strerror))
|
||||||
|
|
||||||
def go(self) -> ReturnCode: # noqa
|
def go(self) -> ReturnCode: # noqa
|
||||||
options = self.context.options
|
options = self.context.options
|
||||||
|
|
|
@ -192,9 +192,12 @@ class Tool(TestToolBase):
|
||||||
steps_changed = steps_str != prev_steps_str
|
steps_changed = steps_str != prev_steps_str
|
||||||
prev_steps_str = steps_str
|
prev_steps_str = steps_str
|
||||||
|
|
||||||
# If this is the first pass, or something has changed, write a text
|
# If a results directory has been specified and this is the first
|
||||||
# file containing verbose information on the current status.
|
# pass or something has changed, write a text file containing
|
||||||
if current_limit == 0 or score_difference or steps_changed:
|
# verbose information on the current status.
|
||||||
|
if options.results_directory and (current_limit == 0 or
|
||||||
|
score_difference or
|
||||||
|
steps_changed):
|
||||||
file_name = '-'.join(
|
file_name = '-'.join(
|
||||||
str(s) for s in [
|
str(s) for s in [
|
||||||
'status', test_name, '{{:0>{}}}'.format(
|
'status', test_name, '{{:0>{}}}'.format(
|
||||||
|
@ -231,31 +234,33 @@ class Tool(TestToolBase):
|
||||||
current_bisect_pass_summary[pass_info[1]].append(
|
current_bisect_pass_summary[pass_info[1]].append(
|
||||||
score_difference)
|
score_difference)
|
||||||
|
|
||||||
per_pass_score_path = os.path.join(
|
if options.results_directory:
|
||||||
options.results_directory,
|
per_pass_score_path = os.path.join(
|
||||||
'{}-per_pass_score.csv'.format(test_name))
|
options.results_directory,
|
||||||
|
'{}-per_pass_score.csv'.format(test_name))
|
||||||
|
|
||||||
with open(per_pass_score_path, mode='w', newline='') as fp:
|
with open(per_pass_score_path, mode='w', newline='') as fp:
|
||||||
writer = csv.writer(fp, delimiter=',')
|
writer = csv.writer(fp, delimiter=',')
|
||||||
writer.writerow(['Source File', 'Pass', 'Score'])
|
writer.writerow(['Source File', 'Pass', 'Score'])
|
||||||
|
|
||||||
for path, pass_, score in per_pass_score:
|
for path, pass_, score in per_pass_score:
|
||||||
writer.writerow([path, pass_, score])
|
writer.writerow([path, pass_, score])
|
||||||
self.context.o.blue('wrote "{}"\n'.format(per_pass_score_path))
|
self.context.o.blue('wrote "{}"\n'.format(per_pass_score_path))
|
||||||
|
|
||||||
pass_summary_path = os.path.join(
|
pass_summary_path = os.path.join(
|
||||||
options.results_directory, '{}-pass-summary.csv'.format(test_name))
|
options.results_directory, '{}-pass-summary.csv'.format(test_name))
|
||||||
|
|
||||||
self._write_pass_summary(pass_summary_path,
|
self._write_pass_summary(pass_summary_path,
|
||||||
current_bisect_pass_summary)
|
current_bisect_pass_summary)
|
||||||
|
|
||||||
def _handle_results(self) -> ReturnCode:
|
def _handle_results(self) -> ReturnCode:
|
||||||
options = self.context.options
|
options = self.context.options
|
||||||
pass_summary_path = os.path.join(options.results_directory,
|
if options.results_directory:
|
||||||
'overall-pass-summary.csv')
|
pass_summary_path = os.path.join(options.results_directory,
|
||||||
|
'overall-pass-summary.csv')
|
||||||
|
|
||||||
self._write_pass_summary(pass_summary_path,
|
self._write_pass_summary(pass_summary_path,
|
||||||
self._all_bisect_pass_summary)
|
self._all_bisect_pass_summary)
|
||||||
return ReturnCode.OK
|
return ReturnCode.OK
|
||||||
|
|
||||||
def _clang_opt_bisect_build(self, opt_bisect_limits):
|
def _clang_opt_bisect_build(self, opt_bisect_limits):
|
||||||
|
|
|
@ -176,6 +176,7 @@ class Tool(TestToolBase):
|
||||||
"""Returns the path to the test results directory for the test denoted
|
"""Returns the path to the test results directory for the test denoted
|
||||||
by test_name.
|
by test_name.
|
||||||
"""
|
"""
|
||||||
|
assert self.context.options.results_directory != None
|
||||||
return os.path.join(self.context.options.results_directory,
|
return os.path.join(self.context.options.results_directory,
|
||||||
self._get_results_basename(test_name))
|
self._get_results_basename(test_name))
|
||||||
|
|
||||||
|
@ -193,22 +194,25 @@ class Tool(TestToolBase):
|
||||||
|
|
||||||
def _record_steps(self, test_name, steps):
|
def _record_steps(self, test_name, steps):
|
||||||
"""Write out the set of steps out to the test's .txt and .json
|
"""Write out the set of steps out to the test's .txt and .json
|
||||||
results file.
|
results file if a results directory has been specified.
|
||||||
"""
|
"""
|
||||||
output_text_path = self._get_results_text_path(test_name)
|
if self.context.options.results_directory:
|
||||||
with open(output_text_path, 'w') as fp:
|
output_text_path = self._get_results_text_path(test_name)
|
||||||
self.context.o.auto(str(steps), stream=Stream(fp))
|
with open(output_text_path, 'w') as fp:
|
||||||
|
self.context.o.auto(str(steps), stream=Stream(fp))
|
||||||
|
|
||||||
output_dextIR_path = self._get_results_pickle_path(test_name)
|
output_dextIR_path = self._get_results_pickle_path(test_name)
|
||||||
with open(output_dextIR_path, 'wb') as fp:
|
with open(output_dextIR_path, 'wb') as fp:
|
||||||
pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
|
pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
|
||||||
|
|
||||||
def _record_score(self, test_name, heuristic):
|
def _record_score(self, test_name, heuristic):
|
||||||
"""Write out the test's heuristic score to the results .txt file.
|
"""Write out the test's heuristic score to the results .txt file
|
||||||
|
if a results directory has been specified.
|
||||||
"""
|
"""
|
||||||
output_text_path = self._get_results_text_path(test_name)
|
if self.context.options.results_directory:
|
||||||
with open(output_text_path, 'a') as fp:
|
output_text_path = self._get_results_text_path(test_name)
|
||||||
self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
|
with open(output_text_path, 'a') as fp:
|
||||||
|
self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
|
||||||
|
|
||||||
def _record_test_and_display(self, test_case):
|
def _record_test_and_display(self, test_case):
|
||||||
"""Output test case to o stream and record test case internally for
|
"""Output test case to o stream and record test case internally for
|
||||||
|
@ -272,19 +276,20 @@ class Tool(TestToolBase):
|
||||||
if num_tests != 0:
|
if num_tests != 0:
|
||||||
print("@avg: ({:.4f})".format(score_sum/num_tests))
|
print("@avg: ({:.4f})".format(score_sum/num_tests))
|
||||||
|
|
||||||
summary_path = os.path.join(options.results_directory, 'summary.csv')
|
has_failed = lambda test: test.score < options.fail_lt or test.error
|
||||||
with open(summary_path, mode='w', newline='') as fp:
|
if any(map(has_failed, self._test_cases)):
|
||||||
writer = csv.writer(fp, delimiter=',')
|
return_code = ReturnCode.FAIL
|
||||||
writer.writerow(['Test Case', 'Score', 'Error'])
|
|
||||||
|
|
||||||
for test_case in self._test_cases:
|
if options.results_directory:
|
||||||
if (test_case.score < options.fail_lt or
|
summary_path = os.path.join(options.results_directory, 'summary.csv')
|
||||||
test_case.error is not None):
|
with open(summary_path, mode='w', newline='') as fp:
|
||||||
return_code = ReturnCode.FAIL
|
writer = csv.writer(fp, delimiter=',')
|
||||||
|
writer.writerow(['Test Case', 'Score', 'Error'])
|
||||||
|
|
||||||
writer.writerow([
|
for test_case in self._test_cases:
|
||||||
test_case.name, '{:.4f}'.format(test_case.score),
|
writer.writerow([
|
||||||
test_case.error
|
test_case.name, '{:.4f}'.format(test_case.score),
|
||||||
])
|
test_case.error
|
||||||
|
])
|
||||||
|
|
||||||
return return_code
|
return return_code
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Purpose:
|
||||||
|
// Check the `clang-opt-bisect` tool runs with --results-directory.
|
||||||
|
//
|
||||||
|
// RUN: true
|
||||||
|
// RUN: %dexter_base clang-opt-bisect \
|
||||||
|
// RUN: --debugger %dexter_regression_test_debugger \
|
||||||
|
// RUN: --builder %dexter_regression_test_builder \
|
||||||
|
// RUN: --cflags "%dexter_regression_test_cflags" \
|
||||||
|
// RUN: --ldflags "%dexter_regression_test_ldflags" \
|
||||||
|
// RUN: --results-directory=%t \
|
||||||
|
// RUN: -- %s \
|
||||||
|
// RUN: | FileCheck %s
|
||||||
|
//// Clean up those results files.
|
||||||
|
// RUN: rm %t/clang-opt-bisect-results.cpp-pass-summary.csv
|
||||||
|
// RUN: rm %t/clang-opt-bisect-results.cpp-per_pass_score.csv
|
||||||
|
// RUN: rm %t/overall-pass-summary.csv
|
||||||
|
// RUN: rm %t/*.dextIR
|
||||||
|
// RUN: rm %t/*.txt
|
||||||
|
// RUN: rmdir %t
|
||||||
|
// CHECK: running pass 0
|
||||||
|
// CHECK: wrote{{.*}}per_pass_score
|
||||||
|
// CHECK: wrote{{.*}}pass-summary
|
||||||
|
// CHECK: wrote{{.*}}overall-pass-summary
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -10,9 +10,6 @@
|
||||||
// RUN: -- %s \
|
// RUN: -- %s \
|
||||||
// RUN: | FileCheck %s
|
// RUN: | FileCheck %s
|
||||||
// CHECK: running pass 0
|
// CHECK: running pass 0
|
||||||
// CHECK: wrote{{.*}}per_pass_score
|
|
||||||
// CHECK: wrote{{.*}}pass-summary
|
|
||||||
// CHECK: wrote{{.*}}overall-pass-summary
|
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue