Add --output-file option for clean results output

- Add --output-file argument to test_logo_detection.py that appends
  only the results summary (no progress indicators) to specified file
- Add write_results_to_file() with detailed header showing test type
  and method parameters
- Update run_comparison_tests.sh to use --output-file instead of
  tee/redirection, keeping console output separate from file output
This commit is contained in:
Rick McEwen
2025-12-31 17:42:52 -05:00
parent 41bc0c701f
commit 41c75356d9
2 changed files with 112 additions and 14 deletions

View File

@ -268,6 +268,12 @@ def main():
action="store_true",
help="Clear embedding cache before running",
)
parser.add_argument(
"--output-file",
type=str,
default=None,
help="Append results summary to this file (no progress output, just results)",
)
args = parser.parse_args()
logger = setup_logging(args.verbose)
@ -579,6 +585,92 @@ def main():
print("=" * 60)
# Write results to file if requested
if args.output_file:
write_results_to_file(
output_path=Path(args.output_file),
args=args,
num_logos=len(sampled_logos),
total_refs=total_refs,
num_test_images=len(test_images),
true_positives=true_positives,
false_positives=false_positives,
false_negatives=false_negatives,
total_expected=total_expected,
precision=precision,
recall=recall,
f1=f1,
)
print(f"\nResults appended to: {args.output_file}")
def write_results_to_file(
output_path: Path,
args,
num_logos: int,
total_refs: int,
num_test_images: int,
true_positives: int,
false_positives: int,
false_negatives: int,
total_expected: int,
precision: float,
recall: float,
f1: float,
):
"""Write results summary to file with detailed header."""
from datetime import datetime
# Build method description for header
if args.matching_method == "simple":
method_desc = "Simple (all matches above threshold)"
elif args.matching_method == "margin":
method_desc = f"Margin-based (margin={args.margin})"
else: # multi-ref
agg = "max" if args.use_max_similarity else "mean"
method_desc = f"Multi-ref ({agg}, min_refs={args.min_matching_refs}, margin={args.margin})"
lines = [
"=" * 70,
f"TEST: {args.matching_method.upper()} MATCHING",
f"Method: {method_desc}",
"=" * 70,
f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
"",
"Configuration:",
f" Reference logos: {num_logos}",
f" Refs per logo: {args.refs_per_logo}",
f" Total reference embeddings:{total_refs}",
f" Positive samples/logo: {args.positive_samples}",
f" Negative samples/logo: {args.negative_samples}",
f" Test images processed: {num_test_images}",
f" CLIP threshold: {args.threshold}",
f" DETR threshold: {args.detr_threshold}",
]
if args.seed is not None:
lines.append(f" Random seed: {args.seed}")
lines.extend([
"",
"Results:",
f" True Positives: {true_positives:>6}",
f" False Positives: {false_positives:>6}",
f" False Negatives: {false_negatives:>6}",
f" Total Expected: {total_expected:>6}",
"",
"Scores:",
f" Precision: {precision:.4f} ({precision*100:.1f}%)",
f" Recall: {recall:.4f} ({recall*100:.1f}%)",
f" F1 Score: {f1:.4f} ({f1*100:.1f}%)",
"",
"",
])
# Append to file
with open(output_path, "a") as f:
f.write("\n".join(lines))
if __name__ == "__main__":
main()