/// Gestionnaire d'état pour l'analyse des cibles (ChangeNotifier). /// /// Gère le workflow complet d'analyse : chargement d'image, détection de cible, /// gestion des impacts (manuels et automatiques), calcul des scores, /// analyse de groupement et sauvegarde des sessions. library; import 'dart:io'; import 'dart:ui' as ui; import 'package:flutter/foundation.dart'; import 'package:uuid/uuid.dart'; import '../../data/models/session.dart'; import '../../data/models/shot.dart'; import '../../data/models/target_type.dart'; import '../../data/repositories/session_repository.dart'; import '../../services/target_detection_service.dart'; import '../../services/score_calculator_service.dart'; import '../../services/grouping_analyzer_service.dart'; import '../../services/distortion_correction_service.dart'; import '../../services/opencv_target_service.dart'; enum AnalysisState { initial, loading, success, error } class AnalysisProvider extends ChangeNotifier { final TargetDetectionService _detectionService; final ScoreCalculatorService _scoreCalculatorService; final GroupingAnalyzerService _groupingAnalyzerService; final SessionRepository _sessionRepository; final DistortionCorrectionService _distortionService; final OpenCVTargetService _opencvTargetService; final Uuid _uuid = const Uuid(); AnalysisProvider({ required TargetDetectionService detectionService, required ScoreCalculatorService scoreCalculatorService, required GroupingAnalyzerService groupingAnalyzerService, required SessionRepository sessionRepository, DistortionCorrectionService? distortionService, OpenCVTargetService? opencvTargetService, }) : _detectionService = detectionService, _scoreCalculatorService = scoreCalculatorService, _groupingAnalyzerService = groupingAnalyzerService, _sessionRepository = sessionRepository, _distortionService = distortionService ?? DistortionCorrectionService(), _opencvTargetService = opencvTargetService ?? OpenCVTargetService(); AnalysisState _state = AnalysisState.initial; String? _errorMessage; String? _imagePath; TargetType? _targetType; // Target detection results double _targetCenterX = 0.5; double _targetCenterY = 0.5; double _targetRadius = 0.4; double _targetInnerRadius = 0.04; int _ringCount = 10; List? _ringRadii; // Individual ring radii multipliers double _imageAspectRatio = 1.0; // width / height // Shots List _shots = []; // Score results ScoreResult? _scoreResult; // Grouping results GroupingResult? _groupingResult; // Reference-based detection List _referenceImpacts = []; ImpactCharacteristics? _learnedCharacteristics; // Distortion correction bool _distortionCorrectionEnabled = false; DistortionParameters? _distortionParams; String? _correctedImagePath; // Getters AnalysisState get state => _state; String? get errorMessage => _errorMessage; String? get imagePath => _imagePath; TargetType? get targetType => _targetType; double get targetCenterX => _targetCenterX; double get targetCenterY => _targetCenterY; double get targetRadius => _targetRadius; double get targetInnerRadius => _targetInnerRadius; int get ringCount => _ringCount; List? get ringRadii => _ringRadii != null ? List.unmodifiable(_ringRadii!) : null; double get imageAspectRatio => _imageAspectRatio; List get shots => List.unmodifiable(_shots); ScoreResult? get scoreResult => _scoreResult; GroupingResult? get groupingResult => _groupingResult; int get totalScore => _scoreResult?.totalScore ?? 0; int get shotCount => _shots.length; List get referenceImpacts => List.unmodifiable(_referenceImpacts); ImpactCharacteristics? get learnedCharacteristics => _learnedCharacteristics; bool get hasLearnedCharacteristics => _learnedCharacteristics != null; // Distortion correction getters bool get distortionCorrectionEnabled => _distortionCorrectionEnabled; DistortionParameters? get distortionParams => _distortionParams; String? get correctedImagePath => _correctedImagePath; bool get hasDistortion => _distortionParams?.needsCorrection ?? false; /// Retourne le chemin de l'image à afficher (corrigée si activée, originale sinon) String? get displayImagePath => _distortionCorrectionEnabled && _correctedImagePath != null ? _correctedImagePath : _imagePath; /// Analyze an image /// /// [autoAnalyze] determines if we should run automatic detection immediately. /// If false, only the image is loaded and default target parameters are set. Future analyzeImage( String imagePath, TargetType targetType, { bool autoAnalyze = true, }) async { _state = AnalysisState.loading; _imagePath = imagePath; _targetType = targetType; _errorMessage = null; notifyListeners(); try { // Load image to get dimensions final file = File(imagePath); final bytes = await file.readAsBytes(); final codec = await ui.instantiateImageCodec(bytes); final frame = await codec.getNextFrame(); _imageAspectRatio = frame.image.width / frame.image.height; frame.image.dispose(); if (!autoAnalyze) { // Just setup default values without running detection _targetCenterX = 0.5; _targetCenterY = 0.5; _targetRadius = 0.4; _targetInnerRadius = 0.04; // Initialize empty shots list _shots = []; _state = AnalysisState.success; notifyListeners(); return; } // Detect target and impacts final result = _detectionService.detectTarget(imagePath, targetType); if (!result.success) { _state = AnalysisState.error; _errorMessage = result.errorMessage; notifyListeners(); return; } _targetCenterX = result.centerX; _targetCenterY = result.centerY; _targetRadius = result.radius; _targetInnerRadius = result.radius * 0.1; // Create shots from detected impacts _shots = result.impacts.map((impact) { return Shot( id: _uuid.v4(), x: impact.x, y: impact.y, score: impact.suggestedScore, sessionId: '', // Will be set when saving ); }).toList(); // Calculate scores _recalculateScores(); // Calculate grouping _recalculateGrouping(); _state = AnalysisState.success; notifyListeners(); } catch (e) { _state = AnalysisState.error; _errorMessage = 'Erreur d\'analyse: $e'; notifyListeners(); } } /// Add a manual shot void addShot(double x, double y) { final score = _calculateShotScore(x, y); final shot = Shot(id: _uuid.v4(), x: x, y: y, score: score, sessionId: ''); _shots.add(shot); _recalculateScores(); _recalculateGrouping(); notifyListeners(); } /// Remove a shot void removeShot(String shotId) { _shots.removeWhere((shot) => shot.id == shotId); _recalculateScores(); _recalculateGrouping(); notifyListeners(); } /// Move a shot to a new position void moveShot(String shotId, double newX, double newY) { final index = _shots.indexWhere((shot) => shot.id == shotId); if (index == -1) return; final newScore = _calculateShotScore(newX, newY); _shots[index] = _shots[index].copyWith(x: newX, y: newY, score: newScore); _recalculateScores(); _recalculateGrouping(); notifyListeners(); } /// Auto-detect impacts using image processing Future autoDetectImpacts({ int darkThreshold = 80, int minImpactSize = 20, int maxImpactSize = 500, double minCircularity = 0.6, double minFillRatio = 0.5, bool clearExisting = false, }) async { if (_imagePath == null || _targetType == null) return 0; final settings = ImpactDetectionSettings( darkThreshold: darkThreshold, minImpactSize: minImpactSize, maxImpactSize: maxImpactSize, minCircularity: minCircularity, minFillRatio: minFillRatio, ); final detectedImpacts = _detectionService.detectImpactsOnly( _imagePath!, _targetType!, _targetCenterX, _targetCenterY, _targetRadius, _ringCount, settings, ); if (clearExisting) { _shots.clear(); } // Add detected impacts as shots for (final impact in detectedImpacts) { final score = _calculateShotScore(impact.x, impact.y); final shot = Shot( id: _uuid.v4(), x: impact.x, y: impact.y, score: score, sessionId: '', ); _shots.add(shot); } _recalculateScores(); _recalculateGrouping(); notifyListeners(); return detectedImpacts.length; } /// Auto-detect impacts using OpenCV (Hough Circles + Contours) /// /// NOTE: OpenCV est actuellement désactivé sur Windows en raison de problèmes /// de compilation. Cette méthode retourne 0 (aucun impact détecté). /// Utiliser autoDetectImpacts() à la place. /// /// Utilise les algorithmes OpenCV pour une détection plus robuste: /// - Transformation de Hough pour détecter les cercles /// - Analyse de contours avec filtrage par circularité Future autoDetectImpactsWithOpenCV({ double cannyThreshold1 = 50, double cannyThreshold2 = 150, double minDist = 20, double param1 = 100, double param2 = 30, int minRadius = 5, int maxRadius = 50, int blurSize = 5, bool useContourDetection = true, double minCircularity = 0.6, double minContourArea = 50, double maxContourArea = 5000, bool clearExisting = false, }) async { if (_imagePath == null || _targetType == null) return 0; final settings = OpenCVDetectionSettings( cannyThreshold1: cannyThreshold1, cannyThreshold2: cannyThreshold2, minDist: minDist, param1: param1, param2: param2, minRadius: minRadius, maxRadius: maxRadius, blurSize: blurSize, useContourDetection: useContourDetection, minCircularity: minCircularity, minContourArea: minContourArea, maxContourArea: maxContourArea, ); final detectedImpacts = _detectionService.detectImpactsWithOpenCV( _imagePath!, _targetType!, _targetCenterX, _targetCenterY, _targetRadius, _ringCount, settings: settings, ); if (clearExisting) { _shots.clear(); } // Add detected impacts as shots for (final impact in detectedImpacts) { final score = _calculateShotScore(impact.x, impact.y); final shot = Shot( id: _uuid.v4(), x: impact.x, y: impact.y, score: score, sessionId: '', ); _shots.add(shot); } _recalculateScores(); _recalculateGrouping(); notifyListeners(); return detectedImpacts.length; } /// Detect impacts with OpenCV using reference points Future detectFromReferencesWithOpenCV({ double tolerance = 2.0, bool clearExisting = false, }) async { if (_imagePath == null || _targetType == null || _referenceImpacts.length < 2) { return 0; } // Convertir les références final references = _referenceImpacts .map((shot) => ReferenceImpact(x: shot.x, y: shot.y)) .toList(); final detectedImpacts = _detectionService .detectImpactsWithOpenCVFromReferences( _imagePath!, _targetType!, _targetCenterX, _targetCenterY, _targetRadius, _ringCount, references, tolerance: tolerance, ); if (clearExisting) { _shots.clear(); } // Add detected impacts as shots for (final impact in detectedImpacts) { final score = _calculateShotScore(impact.x, impact.y); final shot = Shot( id: _uuid.v4(), x: impact.x, y: impact.y, score: score, sessionId: '', ); _shots.add(shot); } _recalculateScores(); _recalculateGrouping(); notifyListeners(); return detectedImpacts.length; } /// Add a reference impact for calibrated detection void addReferenceImpact(double x, double y) { final score = _calculateShotScore(x, y); final shot = Shot(id: _uuid.v4(), x: x, y: y, score: score, sessionId: ''); _referenceImpacts.add(shot); notifyListeners(); } /// Remove a reference impact void removeReferenceImpact(String shotId) { _referenceImpacts.removeWhere((shot) => shot.id == shotId); _learnedCharacteristics = null; notifyListeners(); } /// Clear all reference impacts void clearReferenceImpacts() { _referenceImpacts.clear(); _learnedCharacteristics = null; notifyListeners(); } /// Learn characteristics from reference impacts bool learnFromReferences() { if (_imagePath == null || _referenceImpacts.length < 2) return false; final references = _referenceImpacts .map((shot) => ReferenceImpact(x: shot.x, y: shot.y)) .toList(); _learnedCharacteristics = _detectionService.analyzeReferenceImpacts( _imagePath!, references, ); notifyListeners(); return _learnedCharacteristics != null; } /// Auto-detect impacts using learned reference characteristics Future detectFromReferences({ double tolerance = 2.0, bool clearExisting = false, }) async { if (_imagePath == null || _targetType == null || _learnedCharacteristics == null) { return 0; } final detectedImpacts = _detectionService.detectImpactsFromReferences( _imagePath!, _targetType!, _targetCenterX, _targetCenterY, _targetRadius, _ringCount, _learnedCharacteristics!, tolerance: tolerance, ); if (clearExisting) { _shots.clear(); } // Add detected impacts as shots for (final impact in detectedImpacts) { final score = _calculateShotScore(impact.x, impact.y); final shot = Shot( id: _uuid.v4(), x: impact.x, y: impact.y, score: score, sessionId: '', ); _shots.add(shot); } _recalculateScores(); _recalculateGrouping(); notifyListeners(); return detectedImpacts.length; } /// Adjust target position void adjustTargetPosition( double centerX, double centerY, double innerRadius, double radius, { int? ringCount, List? ringRadii, }) { _targetCenterX = centerX; _targetCenterY = centerY; _targetInnerRadius = innerRadius; _targetRadius = radius; if (ringCount != null) { _ringCount = ringCount; } if (ringRadii != null) { _ringRadii = ringRadii; } // Recalculate all shot scores based on new target position _shots = _shots.map((shot) { final newScore = _calculateShotScore(shot.x, shot.y); return shot.copyWith(score: newScore); }).toList(); _recalculateScores(); notifyListeners(); } /// Auto-calibrate target using OpenCV Future autoCalibrateTarget() async { if (_imagePath == null) return false; try { final result = await _opencvTargetService.detectTarget(_imagePath!); if (result.success) { adjustTargetPosition( result.centerX, result.centerY, result.radius * 0.1, result.radius, ); return true; } return false; } catch (e) { print('Auto-calibration error: $e'); return false; } } /// Calcule les paramètres de distorsion basés sur la calibration actuelle void calculateDistortion() { _distortionParams = _distortionService.calculateDistortionFromCalibration( targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetRadius: _targetRadius, imageAspectRatio: _imageAspectRatio, ); notifyListeners(); } /// Applique la correction de distorsion à l'image /// Crée une nouvelle image corrigée et la sauvegarde Future applyDistortionCorrection() async { if (_imagePath == null || _distortionParams == null) return; try { _correctedImagePath = await _distortionService.applyCorrection( _imagePath!, _distortionParams!, ); _distortionCorrectionEnabled = true; notifyListeners(); } catch (e) { _errorMessage = 'Erreur lors de la correction: $e'; notifyListeners(); } } /// Active ou désactive l'affichage de l'image corrigée void setDistortionCorrectionEnabled(bool enabled) { if (enabled && _correctedImagePath == null && _distortionParams != null) { // Si on active mais pas encore d'image corrigée, la créer applyDistortionCorrection(); } else { _distortionCorrectionEnabled = enabled; notifyListeners(); } } /* version deux a tester*/ /// Calcule ET applique la correction pour un feedback immédiat Future calculateAndApplyDistortion() async { // 1. Calcul des paramètres (votre code actuel) _distortionParams = _distortionService.calculateDistortionFromCalibration( targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetRadius: _targetRadius, imageAspectRatio: _imageAspectRatio, ); // 2. Vérification si une correction est réellement nécessaire if (_distortionParams != null && _distortionParams!.needsCorrection) { // 3. Application immédiate de la transformation (méthode asynchrone) await applyDistortionCorrection(); } else { notifyListeners(); // On prévient quand même si pas de correction } } Future runFullDistortionWorkflow() async { _state = AnalysisState.loading; // Affiche un spinner sur votre UI notifyListeners(); try { calculateDistortion(); // Calcule les paramètres await applyDistortionCorrection(); // Génère le fichier corrigé _distortionCorrectionEnabled = true; // Active l'affichage _state = AnalysisState.success; } catch (e) { _errorMessage = "Erreur de rendu : $e"; _state = AnalysisState.error; } finally { notifyListeners(); } } /* fin section deux a tester*/ int _calculateShotScore(double x, double y) { if (_targetType == TargetType.concentric) { return _scoreCalculatorService.calculateConcentricScore( shotX: x, shotY: y, targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetRadius: _targetRadius, ringCount: _ringCount, imageAspectRatio: _imageAspectRatio, ringRadii: _ringRadii, ); } else { return _scoreCalculatorService.calculateSilhouetteScore( shotX: x, shotY: y, targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetWidth: _targetRadius * 0.8, targetHeight: _targetRadius * 2, ); } } void _recalculateScores() { if (_targetType == null) return; _scoreResult = _scoreCalculatorService.calculateScores( shots: _shots, targetType: _targetType!, targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetRadius: _targetRadius, ringCount: _ringCount, imageAspectRatio: _imageAspectRatio, ringRadii: _ringRadii, ); } void _recalculateGrouping() { _groupingResult = _groupingAnalyzerService.analyzeGrouping(_shots); } /// Save the session Future saveSession({String? notes}) async { if (_imagePath == null || _targetType == null) { throw Exception('Cannot save: missing image or target type'); } final session = await _sessionRepository.createSession( targetType: _targetType!, imagePath: _imagePath!, shots: _shots.map((s) => s.copyWith(sessionId: '')).toList(), totalScore: totalScore, groupingDiameter: _groupingResult?.diameter, groupingCenterX: _groupingResult?.centerX, groupingCenterY: _groupingResult?.centerY, notes: notes, targetCenterX: _targetCenterX, targetCenterY: _targetCenterY, targetRadius: _targetRadius, ); // Update shots with session ID _shots = session.shots; notifyListeners(); return session; } /// Reset the provider void reset() { _state = AnalysisState.initial; _errorMessage = null; _imagePath = null; _targetType = null; _targetCenterX = 0.5; _targetCenterY = 0.5; _targetRadius = 0.4; _targetInnerRadius = 0.04; _ringCount = 10; _ringRadii = null; _imageAspectRatio = 1.0; _shots = []; _scoreResult = null; _groupingResult = null; _referenceImpacts = []; _learnedCharacteristics = null; _distortionCorrectionEnabled = false; _distortionParams = null; _correctedImagePath = null; notifyListeners(); } }