# Training configuration optimized for cloud RTX 4090 / RTX 3090 (24GB VRAM) # # Usage: # python train_clip_logo.py --config configs/cloud_rtx4090.yaml # # Estimated training time: 4-6 hours # Estimated cost on RunPod: ~$3 # Base model base_model: "openai/clip-vit-large-patch14" # Dataset paths dataset_dir: "LogoDet-3K" reference_dir: "reference_logos" db_path: "test_data_mapping.db" # Data splits train_split: 0.7 val_split: 0.15 test_split: 0.15 # Larger batches for faster training on 24GB VRAM batch_size: 32 logos_per_batch: 32 samples_per_logo: 4 gradient_accumulation_steps: 4 # Effective batch = 128 num_workers: 8 # Model architecture lora_r: 16 lora_alpha: 32 lora_dropout: 0.1 freeze_layers: 12 use_gradient_checkpointing: true # Training learning_rate: 1.0e-5 weight_decay: 0.01 warmup_steps: 500 max_epochs: 20 mixed_precision: true # Loss temperature: 0.07 loss_type: "infonce" triplet_margin: 0.3 # Early stopping patience: 5 min_delta: 0.001 # Output checkpoint_dir: "checkpoints" output_dir: "models/logo_detection/clip_finetuned" save_every_n_epochs: 2 # Save more frequently for cloud # Logging log_every_n_steps: 10 eval_every_n_epochs: 1 seed: 42 use_hard_negatives: false use_augmentation: true augmentation_strength: "medium"