Add embedding model selection and comparison test scripts
- Update DetectLogosDETR to support both CLIP and DINOv2 models - Rename clip_model parameter to embedding_model - Add model type detection for different embedding extraction - DINOv2 uses CLS token, CLIP uses get_image_features() - Add -e/--embedding-model argument to test_logo_detection.py - Include model name in file output header - Add run_threshold_tests.sh for testing various threshold/margin values - Add run_model_comparison.sh for comparing CLIP vs DINOv2 models
This commit is contained in:
92
run_model_comparison.sh
Executable file
92
run_model_comparison.sh
Executable file
@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Compare different embedding models for logo detection.
|
||||
# Tests CLIP vs DINOv2 models.
|
||||
#
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
OUTPUT_FILE="${SCRIPT_DIR}/model_comparison_results.txt"
|
||||
|
||||
# Common parameters
|
||||
NUM_LOGOS=20
|
||||
REFS_PER_LOGO=10
|
||||
POSITIVE_SAMPLES=20
|
||||
NEGATIVE_SAMPLES=100
|
||||
MIN_MATCHING_REFS=3
|
||||
THRESHOLD=0.80
|
||||
MARGIN=0.10
|
||||
SEED=42
|
||||
|
||||
# Clear output file and write header
|
||||
echo "Embedding Model Comparison Tests" > "$OUTPUT_FILE"
|
||||
echo "=================================" >> "$OUTPUT_FILE"
|
||||
echo "Date: $(date)" >> "$OUTPUT_FILE"
|
||||
echo "" >> "$OUTPUT_FILE"
|
||||
echo "Common Parameters:" >> "$OUTPUT_FILE"
|
||||
echo " Matching method: multi-ref (max)" >> "$OUTPUT_FILE"
|
||||
echo " Reference logos: $NUM_LOGOS" >> "$OUTPUT_FILE"
|
||||
echo " Refs per logo: $REFS_PER_LOGO" >> "$OUTPUT_FILE"
|
||||
echo " Positive samples: $POSITIVE_SAMPLES" >> "$OUTPUT_FILE"
|
||||
echo " Negative samples: $NEGATIVE_SAMPLES" >> "$OUTPUT_FILE"
|
||||
echo " Min matching refs: $MIN_MATCHING_REFS" >> "$OUTPUT_FILE"
|
||||
echo " Threshold: $THRESHOLD" >> "$OUTPUT_FILE"
|
||||
echo " Margin: $MARGIN" >> "$OUTPUT_FILE"
|
||||
echo " Seed: $SEED" >> "$OUTPUT_FILE"
|
||||
echo "" >> "$OUTPUT_FILE"
|
||||
|
||||
echo "Running model comparison tests..."
|
||||
echo " Matching method: multi-ref (max)"
|
||||
echo " Reference logos: $NUM_LOGOS"
|
||||
echo " Threshold: $THRESHOLD"
|
||||
echo " Margin: $MARGIN"
|
||||
echo " Seed: $SEED"
|
||||
echo ""
|
||||
|
||||
# IMPORTANT: Clear cache between model tests since embeddings are model-specific
|
||||
echo "NOTE: Cache will be cleared between model tests to ensure correct embeddings."
|
||||
echo ""
|
||||
|
||||
# Test 1: CLIP ViT-Large (default)
|
||||
echo "=== Test 1: CLIP ViT-Large (openai/clip-vit-large-patch14) ==="
|
||||
uv run python "$SCRIPT_DIR/test_logo_detection.py" \
|
||||
--num-logos $NUM_LOGOS \
|
||||
--refs-per-logo $REFS_PER_LOGO \
|
||||
--positive-samples $POSITIVE_SAMPLES \
|
||||
--negative-samples $NEGATIVE_SAMPLES \
|
||||
--matching-method multi-ref \
|
||||
--min-matching-refs $MIN_MATCHING_REFS \
|
||||
--use-max-similarity \
|
||||
--threshold $THRESHOLD \
|
||||
--margin $MARGIN \
|
||||
--seed $SEED \
|
||||
--embedding-model "openai/clip-vit-large-patch14" \
|
||||
--clear-cache \
|
||||
--output-file "$OUTPUT_FILE"
|
||||
|
||||
echo ""
|
||||
|
||||
# Test 2: DINOv2 Small
|
||||
echo "=== Test 2: DINOv2 Small (facebook/dinov2-small) ==="
|
||||
uv run python "$SCRIPT_DIR/test_logo_detection.py" \
|
||||
--num-logos $NUM_LOGOS \
|
||||
--refs-per-logo $REFS_PER_LOGO \
|
||||
--positive-samples $POSITIVE_SAMPLES \
|
||||
--negative-samples $NEGATIVE_SAMPLES \
|
||||
--matching-method multi-ref \
|
||||
--min-matching-refs $MIN_MATCHING_REFS \
|
||||
--use-max-similarity \
|
||||
--threshold $THRESHOLD \
|
||||
--margin $MARGIN \
|
||||
--seed $SEED \
|
||||
--embedding-model "facebook/dinov2-small" \
|
||||
--clear-cache \
|
||||
--output-file "$OUTPUT_FILE"
|
||||
|
||||
echo ""
|
||||
echo "Results saved to: $OUTPUT_FILE"
|
||||
echo ""
|
||||
echo "Note: You can also try other models:"
|
||||
echo " - facebook/dinov2-base"
|
||||
echo " - facebook/dinov2-large"
|
||||
echo " - openai/clip-vit-base-patch32"
|
||||
echo " - openai/clip-vit-large-patch14-336"
|
||||
Reference in New Issue
Block a user