Add --output-file option for clean results output

- Add --output-file argument to test_logo_detection.py that appends
  only the results summary (no progress indicators) to specified file
- Add write_results_to_file() with detailed header showing test type
  and method parameters
- Update run_comparison_tests.sh to use --output-file instead of
  tee/redirection, keeping console output separate from file output
This commit is contained in:
Rick McEwen
2025-12-31 17:42:52 -05:00
parent 41bc0c701f
commit 41c75356d9
2 changed files with 112 additions and 14 deletions

View File

@ -16,10 +16,19 @@ MIN_MATCHING_REFS=3
# Use a fixed seed for reproducibility across methods # Use a fixed seed for reproducibility across methods
SEED=42 SEED=42
# Clear output file and write header
echo "Logo Detection Comparison Tests" > "$OUTPUT_FILE" echo "Logo Detection Comparison Tests" > "$OUTPUT_FILE"
echo "================================" >> "$OUTPUT_FILE" echo "================================" >> "$OUTPUT_FILE"
echo "Date: $(date)" >> "$OUTPUT_FILE" echo "Date: $(date)" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE" echo "" >> "$OUTPUT_FILE"
echo "Common Parameters:" >> "$OUTPUT_FILE"
echo " Reference logos: $NUM_LOGOS" >> "$OUTPUT_FILE"
echo " Refs per logo: $REFS_PER_LOGO" >> "$OUTPUT_FILE"
echo " Positive samples: $POSITIVE_SAMPLES" >> "$OUTPUT_FILE"
echo " Negative samples: $NEGATIVE_SAMPLES" >> "$OUTPUT_FILE"
echo " Min matching refs: $MIN_MATCHING_REFS" >> "$OUTPUT_FILE"
echo " Seed: $SEED" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE"
echo "Running tests with:" echo "Running tests with:"
echo " Reference logos: $NUM_LOGOS" echo " Reference logos: $NUM_LOGOS"
@ -31,7 +40,7 @@ echo " Seed: $SEED"
echo "" echo ""
# Test 1: Simple matching (baseline - all matches above threshold) # Test 1: Simple matching (baseline - all matches above threshold)
echo "=== Test 1: Simple matching (baseline) ===" | tee -a "$OUTPUT_FILE" echo "=== Test 1: Simple matching (baseline) ==="
uv run python "$SCRIPT_DIR/test_logo_detection.py" \ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--num-logos $NUM_LOGOS \ --num-logos $NUM_LOGOS \
--refs-per-logo $REFS_PER_LOGO \ --refs-per-logo $REFS_PER_LOGO \
@ -39,13 +48,12 @@ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--negative-samples $NEGATIVE_SAMPLES \ --negative-samples $NEGATIVE_SAMPLES \
--matching-method simple \ --matching-method simple \
--seed $SEED \ --seed $SEED \
2>&1 | tee -a "$OUTPUT_FILE" --output-file "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE" echo ""
echo "" >> "$OUTPUT_FILE"
# Test 2: Margin-based matching # Test 2: Margin-based matching
echo "=== Test 2: Margin-based matching ===" | tee -a "$OUTPUT_FILE" echo "=== Test 2: Margin-based matching ==="
uv run python "$SCRIPT_DIR/test_logo_detection.py" \ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--num-logos $NUM_LOGOS \ --num-logos $NUM_LOGOS \
--refs-per-logo $REFS_PER_LOGO \ --refs-per-logo $REFS_PER_LOGO \
@ -53,13 +61,12 @@ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--negative-samples $NEGATIVE_SAMPLES \ --negative-samples $NEGATIVE_SAMPLES \
--matching-method margin \ --matching-method margin \
--seed $SEED \ --seed $SEED \
2>&1 | tee -a "$OUTPUT_FILE" --output-file "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE" echo ""
echo "" >> "$OUTPUT_FILE"
# Test 3: Multi-ref with mean similarity # Test 3: Multi-ref with mean similarity
echo "=== Test 3: Multi-ref matching (mean similarity) ===" | tee -a "$OUTPUT_FILE" echo "=== Test 3: Multi-ref matching (mean similarity) ==="
uv run python "$SCRIPT_DIR/test_logo_detection.py" \ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--num-logos $NUM_LOGOS \ --num-logos $NUM_LOGOS \
--refs-per-logo $REFS_PER_LOGO \ --refs-per-logo $REFS_PER_LOGO \
@ -68,13 +75,12 @@ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--matching-method multi-ref \ --matching-method multi-ref \
--min-matching-refs $MIN_MATCHING_REFS \ --min-matching-refs $MIN_MATCHING_REFS \
--seed $SEED \ --seed $SEED \
2>&1 | tee -a "$OUTPUT_FILE" --output-file "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE" echo ""
echo "" >> "$OUTPUT_FILE"
# Test 4: Multi-ref with max similarity # Test 4: Multi-ref with max similarity
echo "=== Test 4: Multi-ref matching (max similarity) ===" | tee -a "$OUTPUT_FILE" echo "=== Test 4: Multi-ref matching (max similarity) ==="
uv run python "$SCRIPT_DIR/test_logo_detection.py" \ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--num-logos $NUM_LOGOS \ --num-logos $NUM_LOGOS \
--refs-per-logo $REFS_PER_LOGO \ --refs-per-logo $REFS_PER_LOGO \
@ -84,7 +90,7 @@ uv run python "$SCRIPT_DIR/test_logo_detection.py" \
--min-matching-refs $MIN_MATCHING_REFS \ --min-matching-refs $MIN_MATCHING_REFS \
--use-max-similarity \ --use-max-similarity \
--seed $SEED \ --seed $SEED \
2>&1 | tee -a "$OUTPUT_FILE" --output-file "$OUTPUT_FILE"
echo "" echo ""
echo "Results saved to: $OUTPUT_FILE" echo "Results saved to: $OUTPUT_FILE"

View File

@ -268,6 +268,12 @@ def main():
action="store_true", action="store_true",
help="Clear embedding cache before running", help="Clear embedding cache before running",
) )
parser.add_argument(
"--output-file",
type=str,
default=None,
help="Append results summary to this file (no progress output, just results)",
)
args = parser.parse_args() args = parser.parse_args()
logger = setup_logging(args.verbose) logger = setup_logging(args.verbose)
@ -579,6 +585,92 @@ def main():
print("=" * 60) print("=" * 60)
# Write results to file if requested
if args.output_file:
write_results_to_file(
output_path=Path(args.output_file),
args=args,
num_logos=len(sampled_logos),
total_refs=total_refs,
num_test_images=len(test_images),
true_positives=true_positives,
false_positives=false_positives,
false_negatives=false_negatives,
total_expected=total_expected,
precision=precision,
recall=recall,
f1=f1,
)
print(f"\nResults appended to: {args.output_file}")
def write_results_to_file(
output_path: Path,
args,
num_logos: int,
total_refs: int,
num_test_images: int,
true_positives: int,
false_positives: int,
false_negatives: int,
total_expected: int,
precision: float,
recall: float,
f1: float,
):
"""Write results summary to file with detailed header."""
from datetime import datetime
# Build method description for header
if args.matching_method == "simple":
method_desc = "Simple (all matches above threshold)"
elif args.matching_method == "margin":
method_desc = f"Margin-based (margin={args.margin})"
else: # multi-ref
agg = "max" if args.use_max_similarity else "mean"
method_desc = f"Multi-ref ({agg}, min_refs={args.min_matching_refs}, margin={args.margin})"
lines = [
"=" * 70,
f"TEST: {args.matching_method.upper()} MATCHING",
f"Method: {method_desc}",
"=" * 70,
f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
"",
"Configuration:",
f" Reference logos: {num_logos}",
f" Refs per logo: {args.refs_per_logo}",
f" Total reference embeddings:{total_refs}",
f" Positive samples/logo: {args.positive_samples}",
f" Negative samples/logo: {args.negative_samples}",
f" Test images processed: {num_test_images}",
f" CLIP threshold: {args.threshold}",
f" DETR threshold: {args.detr_threshold}",
]
if args.seed is not None:
lines.append(f" Random seed: {args.seed}")
lines.extend([
"",
"Results:",
f" True Positives: {true_positives:>6}",
f" False Positives: {false_positives:>6}",
f" False Negatives: {false_negatives:>6}",
f" Total Expected: {total_expected:>6}",
"",
"Scores:",
f" Precision: {precision:.4f} ({precision*100:.1f}%)",
f" Recall: {recall:.4f} ({recall*100:.1f}%)",
f" F1 Score: {f1:.4f} ({f1*100:.1f}%)",
"",
"",
])
# Append to file
with open(output_path, "a") as f:
f.write("\n".join(lines))
if __name__ == "__main__": if __name__ == "__main__":
main() main()