test opecv échoué

This commit is contained in:
2026-01-27 22:20:53 +01:00
parent f1a8eefdc3
commit 334332bc78
9 changed files with 827 additions and 105 deletions

7
.claude/settings.json Normal file
View File

@@ -0,0 +1,7 @@
{
"permissions": {
"allow": [
"Bash(flutter analyze:*)"
]
}
}

View File

@@ -3,7 +3,14 @@
"allow": [
"Bash(flutter clean:*)",
"Bash(flutter pub get:*)",
"Bash(flutter run:*)"
"Bash(flutter run:*)",
"Bash(cmake:*)",
"Bash(where:*)",
"Bash(winget search:*)",
"Bash(winget install:*)",
"Bash(\"/c/Program Files \\(x86\\)/Microsoft Visual Studio/Installer/vs_installer.exe\" modify --installPath \"C:\\\\Program Files \\(x86\\)\\\\Microsoft Visual Studio\\\\2022\\\\BuildTools\" --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 --add Microsoft.VisualStudio.Component.Windows11SDK.22621 --passive --wait)",
"Bash(cmd //c \"\"\"C:\\\\Program Files\\\\Microsoft Visual Studio\\\\18\\\\Community\\\\Common7\\\\Tools\\\\VsDevCmd.bat\"\" && flutter run -d windows\")",
"Bash(flutter doctor:*)"
]
}
}

View File

@@ -254,6 +254,130 @@ class AnalysisProvider extends ChangeNotifier {
return detectedImpacts.length;
}
/// Auto-detect impacts using OpenCV (Hough Circles + Contours)
///
/// NOTE: OpenCV est actuellement désactivé sur Windows en raison de problèmes
/// de compilation. Cette méthode retourne 0 (aucun impact détecté).
/// Utiliser autoDetectImpacts() à la place.
///
/// Utilise les algorithmes OpenCV pour une détection plus robuste:
/// - Transformation de Hough pour détecter les cercles
/// - Analyse de contours avec filtrage par circularité
Future<int> autoDetectImpactsWithOpenCV({
double cannyThreshold1 = 50,
double cannyThreshold2 = 150,
double minDist = 20,
double param1 = 100,
double param2 = 30,
int minRadius = 5,
int maxRadius = 50,
int blurSize = 5,
bool useContourDetection = true,
double minCircularity = 0.6,
double minContourArea = 50,
double maxContourArea = 5000,
bool clearExisting = false,
}) async {
if (_imagePath == null || _targetType == null) return 0;
final settings = OpenCVDetectionSettings(
cannyThreshold1: cannyThreshold1,
cannyThreshold2: cannyThreshold2,
minDist: minDist,
param1: param1,
param2: param2,
minRadius: minRadius,
maxRadius: maxRadius,
blurSize: blurSize,
useContourDetection: useContourDetection,
minCircularity: minCircularity,
minContourArea: minContourArea,
maxContourArea: maxContourArea,
);
final detectedImpacts = _detectionService.detectImpactsWithOpenCV(
_imagePath!,
_targetType!,
_targetCenterX,
_targetCenterY,
_targetRadius,
_ringCount,
settings: settings,
);
if (clearExisting) {
_shots.clear();
}
// Add detected impacts as shots
for (final impact in detectedImpacts) {
final score = _calculateShotScore(impact.x, impact.y);
final shot = Shot(
id: _uuid.v4(),
x: impact.x,
y: impact.y,
score: score,
sessionId: '',
);
_shots.add(shot);
}
_recalculateScores();
_recalculateGrouping();
notifyListeners();
return detectedImpacts.length;
}
/// Detect impacts with OpenCV using reference points
Future<int> detectFromReferencesWithOpenCV({
double tolerance = 2.0,
bool clearExisting = false,
}) async {
if (_imagePath == null || _targetType == null || _referenceImpacts.length < 2) {
return 0;
}
// Convertir les références
final references = _referenceImpacts
.map((shot) => ReferenceImpact(x: shot.x, y: shot.y))
.toList();
final detectedImpacts = _detectionService.detectImpactsWithOpenCVFromReferences(
_imagePath!,
_targetType!,
_targetCenterX,
_targetCenterY,
_targetRadius,
_ringCount,
references,
tolerance: tolerance,
);
if (clearExisting) {
_shots.clear();
}
// Add detected impacts as shots
for (final impact in detectedImpacts) {
final score = _calculateShotScore(impact.x, impact.y);
final shot = Shot(
id: _uuid.v4(),
x: impact.x,
y: impact.y,
score: score,
sessionId: '',
);
_shots.add(shot);
}
_recalculateScores();
_recalculateGrouping();
notifyListeners();
return detectedImpacts.length;
}
/// Add a reference impact for calibrated detection
void addReferenceImpact(double x, double y) {
final score = _calculateShotScore(x, y);
@@ -405,6 +529,45 @@ class AnalysisProvider extends ChangeNotifier {
}
}
/* version deux a tester*/
/// Calcule ET applique la correction pour un feedback immédiat
Future<void> calculateAndApplyDistortion() async {
// 1. Calcul des paramètres (votre code actuel)
_distortionParams = _distortionService.calculateDistortionFromCalibration(
targetCenterX: _targetCenterX,
targetCenterY: _targetCenterY,
targetRadius: _targetRadius,
imageAspectRatio: _imageAspectRatio,
);
// 2. Vérification si une correction est réellement nécessaire
if (_distortionParams != null && _distortionParams!.needsCorrection) {
// 3. Application immédiate de la transformation (méthode asynchrone)
await applyDistortionCorrection();
} else {
notifyListeners(); // On prévient quand même si pas de correction
}
}
Future<void> runFullDistortionWorkflow() async {
_state = AnalysisState.loading; // Affiche un spinner sur votre UI
notifyListeners();
try {
calculateDistortion(); // Calcule les paramètres
await applyDistortionCorrection(); // Génère le fichier corrigé
_distortionCorrectionEnabled = true; // Active l'affichage
_state = AnalysisState.success;
} catch (e) {
_errorMessage = "Erreur de rendu : $e";
_state = AnalysisState.error;
} finally {
notifyListeners();
}
}
/* fin section deux a tester*/
int _calculateShotScore(double x, double y) {
if (_targetType == TargetType.concentric) {
return _scoreCalculatorService.calculateConcentricScore(

View File

@@ -903,12 +903,16 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
}
void _showAutoDetectDialog(BuildContext context, AnalysisProvider provider) {
// Detection settings
bool clearExisting = true;
double minCircularity = 0.6;
int darkThreshold = 80;
int minImpactSize = 20;
int maxImpactSize = 500;
double minCircularity = 0.6;
double minFillRatio = 0.5;
bool clearExisting = true;
// NOTE: OpenCV désactivé - problèmes de build Windows
// Utilisation de la détection classique uniquement
showDialog(
context: context,
@@ -1012,6 +1016,7 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
setState(() => maxImpactSize = value.round());
},
),
const SizedBox(height: 12),
// Clear existing checkbox
@@ -1053,7 +1058,7 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
),
);
// Run detection
// Run classic detection
final count = await provider.autoDetectImpacts(
darkThreshold: darkThreshold,
minImpactSize: minImpactSize,
@@ -1090,6 +1095,8 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
void _showCalibratedDetectionDialog(BuildContext context, AnalysisProvider provider) {
double tolerance = 2.0;
bool clearExisting = true;
// NOTE: OpenCV désactivé - problèmes de build Windows
// Utilisation de la détection classique uniquement
showDialog(
context: context,
@@ -1177,7 +1184,7 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
onPressed: () async {
Navigator.pop(context);
// Learn from references
// Show loading
ScaffoldMessenger.of(context).showSnackBar(
const SnackBar(
content: Row(
@@ -1195,6 +1202,7 @@ class _AnalysisScreenContentState extends State<_AnalysisScreenContent> {
),
);
// Classic detection: learn then detect
final learned = provider.learnFromReferences();
if (!learned) {

View File

@@ -402,13 +402,64 @@ class DistortionCorrectionService {
return h;
}
/// Résout le système linéaire pour trouver la matrice d'homographie 3x3.
/// Utilise l'élimination de Gauss-Jordan avec pivot partiel pour la stabilité.
List<double> _solveHomography(List<List<double>> a) {
// Implémentation simplifiée - normalisation et résolution
// En pratique, on devrait utiliser une vraie décomposition SVD
// Le système 'a' est de taille 8x9 (8 équations, 9 inconnues).
// On fixe h8 = 1.0 pour résoudre le système, ce qui nous donne un système 8x8.
final int n = 8;
final List<List<double>> matrix = List.generate(n, (i) => List<double>.from(a[i]));
// Vecteur B (les constantes de l'autre côté de l'égalité)
// Dans DLT, -h8 * dx (ou dy) devient le terme constant.
final List<double> b = List.generate(n, (i) => -matrix[i][8]);
// Pour l'instant, retourner une matrice identité
// TODO: Implémenter une vraie résolution
return [1, 0, 0, 0, 1, 0, 0, 0, 1];
// Élimination de Gauss-Jordan
for (int i = 0; i < n; i++) {
// Recherche du pivot (valeur maximale dans la colonne pour limiter les erreurs)
int pivot = i;
for (int j = i + 1; j < n; j++) {
if (matrix[j][i].abs() > matrix[pivot][i].abs()) {
pivot = j;
}
}
// Échange des lignes (si nécessaire)
final List<double> tempRow = matrix[i];
matrix[i] = matrix[pivot];
matrix[pivot] = tempRow;
final double tempB = b[i];
b[i] = b[pivot];
b[pivot] = tempB;
// Vérification de la singularité (division par zéro impossible)
if (matrix[i][i].abs() < 1e-10) {
return [1, 0, 0, 0, 1, 0, 0, 0, 1]; // Retourne identité si échec
}
// Normalisation de la ligne pivot
for (int j = i + 1; j < n; j++) {
final double factor = matrix[j][i] / matrix[i][i];
b[j] -= factor * b[i];
for (int k = i; k < n; k++) {
matrix[j][k] -= factor * matrix[i][k];
}
}
}
// Substitution arrière
final List<double> h = List.filled(9, 0.0);
for (int i = n - 1; i >= 0; i--) {
double sum = 0.0;
for (int j = i + 1; j < n; j++) {
sum += matrix[i][j] * h[j];
}
h[i] = (b[i] - sum) / matrix[i][i];
}
h[8] = 1.0; // Normalisation finale
return h;
}
({double x, double y}) _applyPerspectiveTransform(List<double> h, double x, double y) {

View File

@@ -196,10 +196,11 @@ class ImageProcessingService {
/// Analyze reference impacts to learn their characteristics
/// This actually finds the blob at each reference point and extracts its real properties
/// AMÉLIORÉ : Recherche plus large et analyse plus robuste
ImpactCharacteristics? analyzeReferenceImpacts(
String imagePath,
List<ReferenceImpact> references, {
int searchRadius = 30,
int searchRadius = 50, // Augmenté de 30 à 50
}) {
if (references.length < 2) return null;
@@ -209,10 +210,10 @@ class ImageProcessingService {
final originalImage = img.decodeImage(bytes);
if (originalImage == null) return null;
// Resize for faster processing
// Resize for faster processing - taille augmentée
img.Image image;
double scale = 1.0;
final maxDimension = 1000;
final maxDimension = 1200; // Augmenté pour plus de précision
if (originalImage.width > maxDimension || originalImage.height > maxDimension) {
scale = maxDimension / math.max(originalImage.width, originalImage.height);
image = img.copyResize(
@@ -235,45 +236,67 @@ class ImageProcessingService {
final fillRatios = <double>[];
final thresholds = <double>[];
for (final ref in references) {
print('Analyzing ${references.length} reference impacts...');
for (int refIndex = 0; refIndex < references.length; refIndex++) {
final ref = references[refIndex];
final centerX = (ref.x * width).round().clamp(0, width - 1);
final centerY = (ref.y * height).round().clamp(0, height - 1);
// Find the darkest point in the search area (the center of the impact)
print('Reference $refIndex at ($centerX, $centerY)');
// AMÉLIORATION : Recherche du point le plus sombre dans une zone plus large
int darkestX = centerX;
int darkestY = centerY;
double darkestLum = 255;
for (int dy = -searchRadius; dy <= searchRadius; dy++) {
for (int dx = -searchRadius; dx <= searchRadius; dx++) {
final px = centerX + dx;
final py = centerY + dy;
if (px < 0 || px >= width || py < 0 || py >= height) continue;
// Recherche en spirale du point le plus sombre
for (int r = 0; r <= searchRadius; r++) {
for (int dy = -r; dy <= r; dy++) {
for (int dx = -r; dx <= r; dx++) {
// Seulement le périmètre du carré pour éviter les doublons
if (r > 0 && math.max(dx.abs(), dy.abs()) < r) continue;
final pixel = blurred.getPixel(px, py);
final lum = img.getLuminance(pixel).toDouble();
if (lum < darkestLum) {
darkestLum = lum;
darkestX = px;
darkestY = py;
final px = centerX + dx;
final py = centerY + dy;
if (px < 0 || px >= width || py < 0 || py >= height) continue;
final pixel = blurred.getPixel(px, py);
final lum = img.getLuminance(pixel).toDouble();
if (lum < darkestLum) {
darkestLum = lum;
darkestX = px;
darkestY = py;
}
}
}
// Si on a trouvé un point très sombre, on peut s'arrêter
if (darkestLum < 50 && r > 5) break;
}
print(' Darkest point at ($darkestX, $darkestY), lum=$darkestLum');
// Now find the blob at the darkest point using adaptive threshold
// Start from the darkest point and expand until we find the boundary
final blobResult = _findBlobAtPoint(blurred, darkestX, darkestY, width, height);
if (blobResult != null) {
if (blobResult != null && blobResult.size >= 10) { // Au moins 10 pixels
luminances.add(blobResult.avgLuminance);
sizes.add(blobResult.size.toDouble());
circularities.add(blobResult.circularity);
fillRatios.add(blobResult.fillRatio);
thresholds.add(blobResult.threshold);
print(' Found blob: size=${blobResult.size}, circ=${blobResult.circularity.toStringAsFixed(2)}, '
'fill=${blobResult.fillRatio.toStringAsFixed(2)}, threshold=${blobResult.threshold.toStringAsFixed(0)}');
} else {
print(' No valid blob found at this reference');
}
}
if (luminances.isEmpty) return null;
if (luminances.isEmpty) {
print('ERROR: No valid blobs found from any reference!');
return null;
}
// Calculate statistics
final avgLum = luminances.reduce((a, b) => a + b) / luminances.length;
@@ -290,17 +313,25 @@ class ImageProcessingService {
sizeVariance += math.pow(sizes[i] - avgSize, 2);
}
final lumStdDev = math.sqrt(lumVariance / luminances.length);
final sizeStdDev = math.sqrt(sizeVariance / sizes.length);
// AMÉLIORATION : Écart-type minimum pour éviter des plages trop étroites
final sizeStdDev = math.max(
math.sqrt(sizeVariance / sizes.length),
avgSize * 0.3, // Au moins 30% de variance
);
return ImpactCharacteristics(
final result = ImpactCharacteristics(
avgLuminance: avgLum,
luminanceStdDev: lumStdDev,
luminanceStdDev: math.max(lumStdDev, 10), // Minimum 10 de variance
avgSize: avgSize,
sizeStdDev: sizeStdDev,
avgCircularity: avgCirc,
avgFillRatio: avgFill,
avgDarkThreshold: avgThreshold,
);
print('Learned characteristics: $result');
return result;
} catch (e) {
print('Error analyzing reference impacts: $e');
return null;
@@ -308,25 +339,30 @@ class ImageProcessingService {
}
/// Find a blob at a specific point and extract its characteristics
/// AMÉLIORÉ : Utilise plusieurs méthodes de détection et retourne le meilleur résultat
_BlobAnalysis? _findBlobAtPoint(img.Image image, int startX, int startY, int width, int height) {
// Get the luminance at the center point
final centerPixel = image.getPixel(startX, startY);
final centerLum = img.getLuminance(centerPixel).toDouble();
// Find the threshold by looking at the luminance gradient around the point
// Sample in expanding circles to find where the blob ends
// MÉTHODE 1 : Expansion radiale pour trouver le bord
double sumLum = centerLum;
int pixelCount = 1;
double maxRadius = 0;
// Sample at different radii to find the edge
for (int r = 1; r <= 50; r++) {
// Collecter les luminances à différents rayons pour une analyse plus robuste
final radialLuminances = <double>[];
// Sample at different radii to find the edge - LIMITE RAISONNABLE pour impacts de balle
final maxSearchRadius = 60; // Un impact de balle ne fait pas plus de 60 pixels de rayon
for (int r = 1; r <= maxSearchRadius; r++) {
double ringSum = 0;
int ringCount = 0;
// Sample points on a ring
for (int i = 0; i < 16; i++) {
final angle = (i / 16) * 2 * math.pi;
final numSamples = math.max(12, r ~/ 2);
for (int i = 0; i < numSamples; i++) {
final angle = (i / numSamples) * 2 * math.pi;
final px = startX + (r * math.cos(angle)).round();
final py = startY + (r * math.sin(angle)).round();
if (px < 0 || px >= width || py < 0 || py >= height) continue;
@@ -339,20 +375,47 @@ class ImageProcessingService {
if (ringCount > 0) {
final avgRingLum = ringSum / ringCount;
// If the ring is significantly brighter than the center, we've found the edge
if (avgRingLum > centerLum + 40) {
radialLuminances.add(avgRingLum);
// Détection du bord : gradient de luminosité significatif
// Seuil adaptatif basé sur la différence avec le centre
final luminanceDiff = avgRingLum - centerLum;
// Le bord est trouvé quand on a une augmentation significative de luminosité
if (luminanceDiff > 30 && maxRadius == 0) {
maxRadius = r.toDouble();
break;
break; // Arrêter dès qu'on trouve le bord
}
if (maxRadius == 0) {
sumLum += ringSum;
pixelCount += ringCount;
}
sumLum += ringSum;
pixelCount += ringCount;
}
}
if (maxRadius < 3) return null; // Too small to be a valid blob
// Si aucun bord trouvé, chercher le gradient maximum
if (maxRadius < 2 && radialLuminances.length > 3) {
double maxGradient = 0;
int maxGradientIndex = 0;
for (int i = 1; i < radialLuminances.length; i++) {
final gradient = radialLuminances[i] - radialLuminances[i - 1];
if (gradient > maxGradient) {
maxGradient = gradient;
maxGradientIndex = i;
}
}
if (maxGradient > 10) {
maxRadius = (maxGradientIndex + 1).toDouble();
}
}
// Calculate threshold as the midpoint between center and edge luminance
final edgeRadius = (maxRadius * 1.2).round();
// Rayon minimum de 3 pixels, maximum de 50 pour un impact de balle
if (maxRadius < 3) maxRadius = 3;
if (maxRadius > 50) maxRadius = 50;
// Calculate threshold as weighted average between center and edge luminance
final edgeRadius = math.min((maxRadius * 1.2).round(), maxSearchRadius - 1);
double edgeLum = 0;
int edgeCount = 0;
for (int i = 0; i < 16; i++) {
@@ -366,62 +429,94 @@ class ImageProcessingService {
}
if (edgeCount > 0) {
edgeLum /= edgeCount;
} else {
edgeLum = centerLum + 50;
}
final threshold = ((centerLum + edgeLum) / 2).round();
// Calculer le seuil optimal
final threshold = ((centerLum + edgeLum) / 2).round().clamp(20, 200);
// Now do a flood fill with this threshold to get the actual blob
final mask = List.generate(height, (_) => List.filled(width, false));
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
final pixel = image.getPixel(x, y);
// Utiliser une zone de recherche locale limitée autour du point
final analysis = _tryFindBlobWithThresholdLocal(
image, startX, startY, width, height, threshold, sumLum / pixelCount,
maxRadius.round() + 10, // Zone de recherche légèrement plus grande que le rayon détecté
);
return analysis;
}
/// Trouve un blob avec un seuil dans une zone locale limitée
_BlobAnalysis? _tryFindBlobWithThresholdLocal(
img.Image image,
int startX,
int startY,
int width,
int height,
int threshold,
double avgLuminance,
int maxSearchRadius,
) {
// Limiter la zone de recherche
final minX = math.max(0, startX - maxSearchRadius);
final maxX = math.min(width - 1, startX + maxSearchRadius);
final minY = math.max(0, startY - maxSearchRadius);
final maxY = math.min(height - 1, startY + maxSearchRadius);
final localWidth = maxX - minX + 1;
final localHeight = maxY - minY + 1;
// Create binary mask ONLY for the local region
final mask = List.generate(localHeight, (_) => List.filled(localWidth, false));
for (int y = 0; y < localHeight; y++) {
for (int x = 0; x < localWidth; x++) {
final globalX = minX + x;
final globalY = minY + y;
final pixel = image.getPixel(globalX, globalY);
final lum = img.getLuminance(pixel);
mask[y][x] = lum < threshold;
}
}
final visited = List.generate(height, (_) => List.filled(width, false));
final visited = List.generate(localHeight, (_) => List.filled(localWidth, false));
// Find the blob containing the start point
if (!mask[startY][startX]) {
// Find the blob containing the start point (in local coordinates)
final localStartX = startX - minX;
final localStartY = startY - minY;
int searchX = localStartX;
int searchY = localStartY;
if (!mask[localStartY][localStartX]) {
// Start point might not be in mask, find nearest point that is
for (int r = 1; r <= 10; r++) {
bool found = false;
bool found = false;
for (int r = 1; r <= 15 && !found; r++) {
for (int dy = -r; dy <= r && !found; dy++) {
for (int dx = -r; dx <= r && !found; dx++) {
final px = startX + dx;
final py = startY + dy;
if (px >= 0 && px < width && py >= 0 && py < height && mask[py][px]) {
final blob = _floodFill(mask, visited, px, py, width, height);
// Calculate fill ratio: actual pixels / bounding circle area
final boundingRadius = math.max(blob.radius, 1);
final boundingCircleArea = math.pi * boundingRadius * boundingRadius;
final fillRatio = (blob.size / boundingCircleArea).clamp(0.0, 1.0);
return _BlobAnalysis(
avgLuminance: sumLum / pixelCount,
size: blob.size,
circularity: blob.circularity,
fillRatio: fillRatio,
threshold: threshold.toDouble(),
);
final px = localStartX + dx;
final py = localStartY + dy;
if (px >= 0 && px < localWidth && py >= 0 && py < localHeight && mask[py][px]) {
searchX = px;
searchY = py;
found = true;
}
}
}
}
return null;
if (!found) return null;
}
final blob = _floodFill(mask, visited, startX, startY, width, height);
final blob = _floodFillLocal(mask, visited, searchX, searchY, localWidth, localHeight);
// Calculate fill ratio
// Vérifier que le blob est valide - taille raisonnable pour un impact
if (blob.size < 10 || blob.size > 5000) return null; // Entre 10 et 5000 pixels
// Calculate fill ratio: actual pixels / bounding circle area
final boundingRadius = math.max(blob.radius, 1);
final boundingCircleArea = math.pi * boundingRadius * boundingRadius;
final fillRatio = (blob.size / boundingCircleArea).clamp(0.0, 1.0);
return _BlobAnalysis(
avgLuminance: sumLum / pixelCount,
avgLuminance: avgLuminance,
size: blob.size,
circularity: blob.circularity,
fillRatio: fillRatio,
@@ -429,12 +524,110 @@ class ImageProcessingService {
);
}
/// Flood fill pour une zone locale
_Blob _floodFillLocal(
List<List<bool>> mask,
List<List<bool>> visited,
int startX,
int startY,
int width,
int height,
) {
final stack = <_Point>[_Point(startX, startY)];
final points = <_Point>[];
int minX = startX, maxX = startX;
int minY = startY, maxY = startY;
int perimeterCount = 0;
while (stack.isNotEmpty) {
final point = stack.removeLast();
final x = point.x;
final y = point.y;
if (x < 0 || x >= width || y < 0 || y >= height) continue;
if (visited[y][x] || !mask[y][x]) continue;
visited[y][x] = true;
points.add(point);
minX = math.min(minX, x);
maxX = math.max(maxX, x);
minY = math.min(minY, y);
maxY = math.max(maxY, y);
// Check if this is a perimeter pixel
bool isPerimeter = false;
for (final delta in [[-1, 0], [1, 0], [0, -1], [0, 1]]) {
final nx = x + delta[0];
final ny = y + delta[1];
if (nx < 0 || nx >= width || ny < 0 || ny >= height || !mask[ny][nx]) {
isPerimeter = true;
break;
}
}
if (isPerimeter) perimeterCount++;
// Add neighbors (4-connectivity)
stack.add(_Point(x + 1, y));
stack.add(_Point(x - 1, y));
stack.add(_Point(x, y + 1));
stack.add(_Point(x, y - 1));
}
// Calculate centroid
double sumX = 0, sumY = 0;
for (final p in points) {
sumX += p.x;
sumY += p.y;
}
final centerX = points.isNotEmpty ? sumX / points.length : startX.toDouble();
final centerY = points.isNotEmpty ? sumY / points.length : startY.toDouble();
// Calculate bounding box dimensions
final blobWidth = (maxX - minX + 1).toDouble();
final blobHeight = (maxY - minY + 1).toDouble();
// Calculate approximate radius based on bounding box
final radius = math.max(blobWidth, blobHeight) / 2.0;
// Calculate circularity
final area = points.length.toDouble();
final perimeter = perimeterCount.toDouble();
final circularity = perimeter > 0
? (4 * math.pi * area) / (perimeter * perimeter)
: 0.0;
// Calculate aspect ratio
final aspectRatio = blobWidth > blobHeight
? blobWidth / blobHeight
: blobHeight / blobWidth;
// Calculate fill ratio
final boundingCircleArea = math.pi * radius * radius;
final fillRatio = boundingCircleArea > 0 ? (area / boundingCircleArea).clamp(0.0, 1.0) : 0.0;
return _Blob(
x: centerX,
y: centerY,
radius: radius,
size: points.length,
circularity: circularity.clamp(0.0, 1.0),
aspectRatio: aspectRatio,
fillRatio: fillRatio,
);
}
/// Detect impacts based on reference characteristics with tolerance
///
/// Utilise une approche multi-seuils adaptative pour une meilleure détection
List<DetectedImpact> detectImpactsFromReferences(
String imagePath,
ImpactCharacteristics characteristics, {
double tolerance = 2.0, // Number of standard deviations
double minCircularity = 0.4,
double minCircularity = 0.3,
}) {
try {
final file = File(imagePath);
@@ -445,7 +638,7 @@ class ImageProcessingService {
// Resize for faster processing
img.Image image;
double scale = 1.0;
final maxDimension = 1000;
final maxDimension = 1200; // Augmenté pour plus de précision
if (originalImage.width > maxDimension || originalImage.height > maxDimension) {
scale = maxDimension / math.max(originalImage.width, originalImage.height);
image = img.copyResize(
@@ -460,36 +653,83 @@ class ImageProcessingService {
final grayscale = img.grayscale(image);
final blurred = img.gaussianBlur(grayscale, radius: 2);
// Use the threshold learned from references
final threshold = characteristics.avgDarkThreshold.round();
// AMÉLIORATION : Utiliser plusieurs seuils autour du seuil appris
final baseThreshold = characteristics.avgDarkThreshold.round();
// Générer une plage de seuils plus ciblée
final thresholds = <int>[];
final thresholdRange = (15 * tolerance).round(); // Plage modérée
for (int offset = -thresholdRange; offset <= thresholdRange; offset += 8) {
final t = (baseThreshold + offset).clamp(30, 150);
if (!thresholds.contains(t)) thresholds.add(t);
}
// Calculate size range based on learned characteristics
final minSize = (characteristics.avgSize / (tolerance * 2)).clamp(5, 10000).round();
final maxSize = (characteristics.avgSize * tolerance * 2).clamp(10, 10000).round();
// Utiliser la variance mais avec des limites raisonnables
final sizeVariance = math.max(characteristics.sizeStdDev * tolerance, characteristics.avgSize * 0.4);
final minSize = math.max(20, (characteristics.avgSize - sizeVariance).round()); // Minimum 20 pixels
final maxSize = math.min(3000, (characteristics.avgSize + sizeVariance * 2).round()); // Maximum 3000 pixels
// Calculate minimum fill ratio based on learned characteristics
// Allow some variance but still filter out hollow shapes
final minFillRatio = (characteristics.avgFillRatio - 0.2).clamp(0.3, 0.9);
// Calculate minimum circularity - équilibré
final circularityTolerance = 0.2 * tolerance;
final effectiveMinCircularity = math.max(
characteristics.avgCircularity - circularityTolerance,
minCircularity,
).clamp(0.35, 0.85);
// Detect blobs using the learned threshold
final impacts = _detectDarkSpots(
blurred,
threshold,
minSize,
maxSize,
minCircularity: math.max(characteristics.avgCircularity - 0.2, minCircularity),
minFillRatio: minFillRatio,
);
// Calculate minimum fill ratio - impacts pleins
final minFillRatio = (characteristics.avgFillRatio - 0.2).clamp(0.35, 0.85);
print('Detection params: thresholds=$thresholds, size=$minSize-$maxSize, '
'circ>=$effectiveMinCircularity, fill>=$minFillRatio');
// Détecter avec plusieurs seuils et combiner les résultats
final allBlobs = <_Blob>[];
for (final threshold in thresholds) {
final blobs = _detectDarkSpots(
blurred,
threshold,
minSize,
maxSize,
minCircularity: effectiveMinCircularity,
maxAspectRatio: 2.5, // Plus permissif
minFillRatio: minFillRatio,
);
allBlobs.addAll(blobs);
}
// Fusionner les blobs qui se chevauchent (même impact détecté à différents seuils)
final mergedBlobs = _mergeOverlappingBlobs(allBlobs);
// FILTRE POST-DÉTECTION : Garder seulement les blobs similaires aux références
// Le filtre est plus ou moins strict selon la tolérance
final sizeToleranceFactor = 0.3 + (tolerance - 1) * 0.3; // 0.3 à 1.5 selon tolérance
final minSizeRatio = math.max(0.15, 1 / (1 + sizeToleranceFactor * 3));
final maxSizeRatio = 1 + sizeToleranceFactor * 4;
final filteredBlobs = mergedBlobs.where((blob) {
// Vérifier la taille par rapport aux caractéristiques apprises
final sizeRatio = blob.size / characteristics.avgSize;
if (sizeRatio < minSizeRatio || sizeRatio > maxSizeRatio) return false;
// Vérifier la circularité (légèrement relaxée)
if (blob.circularity < effectiveMinCircularity * 0.85) return false;
// Vérifier le fill ratio
if (blob.fillRatio < minFillRatio * 0.9) return false;
return true;
}).toList();
print('Found ${filteredBlobs.length} impacts after filtering (from ${mergedBlobs.length} merged)');
// Convert to relative coordinates
final width = originalImage.width.toDouble();
final height = originalImage.height.toDouble();
return impacts.map((impact) {
return filteredBlobs.map((blob) {
return DetectedImpact(
x: impact.x / image.width,
y: impact.y / image.height,
radius: impact.radius / scale,
x: blob.x / image.width,
y: blob.y / image.height,
radius: blob.radius / scale,
);
}).toList();
} catch (e) {
@@ -498,6 +738,44 @@ class ImageProcessingService {
}
}
/// Fusionne les blobs qui se chevauchent en gardant le meilleur représentant
List<_Blob> _mergeOverlappingBlobs(List<_Blob> blobs) {
if (blobs.isEmpty) return [];
// Trier par score de qualité (circularité * fillRatio)
final sortedBlobs = List<_Blob>.from(blobs);
sortedBlobs.sort((a, b) {
final scoreA = a.circularity * a.fillRatio * a.size;
final scoreB = b.circularity * b.fillRatio * b.size;
return scoreB.compareTo(scoreA);
});
final merged = <_Blob>[];
for (final blob in sortedBlobs) {
bool shouldAdd = true;
for (final existing in merged) {
final dx = blob.x - existing.x;
final dy = blob.y - existing.y;
final distance = math.sqrt(dx * dx + dy * dy);
final minDist = math.min(blob.radius, existing.radius);
// Si les centres sont proches, c'est le même impact
if (distance < minDist * 1.5) {
shouldAdd = false;
break;
}
}
if (shouldAdd) {
merged.add(blob);
}
}
return merged;
}
/// Detect dark spots with adaptive luminance range
List<_Blob> _detectDarkSpotsAdaptive(
img.Image image,

View File

@@ -0,0 +1,119 @@
/// Service de détection d'impacts utilisant OpenCV.
///
/// NOTE: OpenCV est actuellement désactivé sur Windows en raison de problèmes
/// de compilation. Ce fichier contient des stubs qui permettent au code de
/// compiler sans OpenCV. Réactiver opencv_dart dans pubspec.yaml et
/// décommenter le code ci-dessous quand le support sera corrigé.
library;
// import 'dart:math' as math;
// import 'package:opencv_dart/opencv_dart.dart' as cv;
/// Paramètres de détection d'impacts OpenCV
class OpenCVDetectionSettings {
/// Seuil Canny bas pour la détection de contours
final double cannyThreshold1;
/// Seuil Canny haut pour la détection de contours
final double cannyThreshold2;
/// Distance minimale entre les centres des cercles détectés
final double minDist;
/// Paramètre 1 de HoughCircles (seuil Canny interne)
final double param1;
/// Paramètre 2 de HoughCircles (seuil d'accumulation)
final double param2;
/// Rayon minimum des cercles en pixels
final int minRadius;
/// Rayon maximum des cercles en pixels
final int maxRadius;
/// Taille du flou gaussien (doit être impair)
final int blurSize;
/// Utiliser la détection de contours en plus de Hough
final bool useContourDetection;
/// Circularité minimale pour la détection par contours (0-1)
final double minCircularity;
/// Surface minimale des contours
final double minContourArea;
/// Surface maximale des contours
final double maxContourArea;
const OpenCVDetectionSettings({
this.cannyThreshold1 = 50,
this.cannyThreshold2 = 150,
this.minDist = 20,
this.param1 = 100,
this.param2 = 30,
this.minRadius = 5,
this.maxRadius = 50,
this.blurSize = 5,
this.useContourDetection = true,
this.minCircularity = 0.6,
this.minContourArea = 50,
this.maxContourArea = 5000,
});
}
/// Résultat de détection d'impact
class OpenCVDetectedImpact {
/// Position X normalisée (0-1)
final double x;
/// Position Y normalisée (0-1)
final double y;
/// Rayon en pixels
final double radius;
/// Score de confiance (0-1)
final double confidence;
/// Méthode de détection utilisée
final String method;
const OpenCVDetectedImpact({
required this.x,
required this.y,
required this.radius,
this.confidence = 1.0,
this.method = 'unknown',
});
}
/// Service de détection d'impacts utilisant OpenCV
///
/// NOTE: Actuellement désactivé - retourne des listes vides.
/// OpenCV n'est pas disponible sur Windows pour le moment.
class OpenCVImpactDetectionService {
/// Détecte les impacts dans une image en utilisant OpenCV
///
/// STUB: Retourne une liste vide car OpenCV est désactivé.
List<OpenCVDetectedImpact> detectImpacts(
String imagePath, {
OpenCVDetectionSettings settings = const OpenCVDetectionSettings(),
}) {
print('OpenCV est désactivé - utilisation de la détection classique recommandée');
return [];
}
/// Détecte les impacts en utilisant une image de référence
///
/// STUB: Retourne une liste vide car OpenCV est désactivé.
List<OpenCVDetectedImpact> detectFromReferences(
String imagePath,
List<({double x, double y})> referencePoints, {
double tolerance = 2.0,
}) {
print('OpenCV est désactivé - utilisation de la détection par références classique recommandée');
return [];
}
}

View File

@@ -1,8 +1,10 @@
import 'dart:math' as math;
import '../data/models/target_type.dart';
import 'image_processing_service.dart';
import 'opencv_impact_detection_service.dart';
export 'image_processing_service.dart' show ImpactDetectionSettings, ReferenceImpact, ImpactCharacteristics;
export 'opencv_impact_detection_service.dart' show OpenCVDetectionSettings, OpenCVDetectedImpact;
class TargetDetectionResult {
final double centerX; // Relative (0-1)
@@ -49,10 +51,13 @@ class DetectedImpactResult {
class TargetDetectionService {
final ImageProcessingService _imageProcessingService;
final OpenCVImpactDetectionService _opencvService;
TargetDetectionService({
ImageProcessingService? imageProcessingService,
}) : _imageProcessingService = imageProcessingService ?? ImageProcessingService();
OpenCVImpactDetectionService? opencvService,
}) : _imageProcessingService = imageProcessingService ?? ImageProcessingService(),
_opencvService = opencvService ?? OpenCVImpactDetectionService();
/// Detect target and impacts from an image file
TargetDetectionResult detectTarget(
@@ -254,4 +259,88 @@ class TargetDetectionService {
return [];
}
}
/// Détecte les impacts en utilisant OpenCV (Hough Circles + Contours)
///
/// Cette méthode utilise les algorithmes OpenCV pour une détection plus robuste:
/// - Transformation de Hough pour détecter les cercles
/// - Analyse de contours avec filtrage par circularité
List<DetectedImpactResult> detectImpactsWithOpenCV(
String imagePath,
TargetType targetType,
double centerX,
double centerY,
double radius,
int ringCount, {
OpenCVDetectionSettings? settings,
}) {
try {
final impacts = _opencvService.detectImpacts(
imagePath,
settings: settings ?? const OpenCVDetectionSettings(),
);
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
x: impact.x,
y: impact.y,
radius: impact.radius,
suggestedScore: score,
);
}).toList();
} catch (e) {
print('Erreur détection OpenCV: $e');
return [];
}
}
/// Détecte les impacts avec OpenCV en utilisant des références
///
/// Analyse les impacts de référence pour apprendre leurs caractéristiques
/// puis détecte les impacts similaires dans l'image.
List<DetectedImpactResult> detectImpactsWithOpenCVFromReferences(
String imagePath,
TargetType targetType,
double centerX,
double centerY,
double radius,
int ringCount,
List<ReferenceImpact> references, {
double tolerance = 2.0,
}) {
try {
// Convertir les références au format OpenCV
final refPoints = references
.map((r) => (x: r.x, y: r.y))
.toList();
final impacts = _opencvService.detectFromReferences(
imagePath,
refPoints,
tolerance: tolerance,
);
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
x: impact.x,
y: impact.y,
radius: impact.radius,
suggestedScore: score,
);
}).toList();
} catch (e) {
print('Erreur détection OpenCV depuis références: $e');
return [];
}
}
}

View File

@@ -35,7 +35,7 @@ dependencies:
# Use with the CupertinoIcons class for iOS style icons.
cupertino_icons: ^1.0.8
# Image processing with OpenCV (disabled for now due to build issues)
# Image processing with OpenCV (désactivé temporairement - problèmes de build Windows)
# opencv_dart: ^2.1.0
# Image capture from camera/gallery