preparation du modele yolo

This commit is contained in:
2026-03-12 22:03:40 +01:00
parent d4cb179fde
commit e32833e366
13 changed files with 727 additions and 24 deletions

View File

@@ -523,6 +523,20 @@ class AnalysisProvider extends ChangeNotifier {
if (_imagePath == null) return false;
try {
// 1. Attempt to correct perspective/distortion first
final correctedPath = await _distortionService
.correctPerspectiveWithConcentricMesh(_imagePath!);
if (correctedPath != _imagePath) {
_imagePath = correctedPath;
_correctedImagePath = correctedPath;
_distortionCorrectionEnabled = true;
_imageAspectRatio =
1.0; // The corrected image is always square (side x side)
notifyListeners();
}
// 2. Detect the target on the straight/corrected image
final result = await _opencvTargetService.detectTarget(_imagePath!);
if (result.success) {

View File

@@ -10,6 +10,7 @@ import 'services/target_detection_service.dart';
import 'services/score_calculator_service.dart';
import 'services/grouping_analyzer_service.dart';
import 'services/image_processing_service.dart';
import 'services/yolo_impact_detection_service.dart';
void main() async {
WidgetsFlutterBinding.ensureInitialized();
@@ -33,9 +34,13 @@ void main() async {
Provider<ImageProcessingService>(
create: (_) => ImageProcessingService(),
),
Provider<YOLOImpactDetectionService>(
create: (_) => YOLOImpactDetectionService(),
),
Provider<TargetDetectionService>(
create: (context) => TargetDetectionService(
imageProcessingService: context.read<ImageProcessingService>(),
yoloService: context.read<YOLOImpactDetectionService>(),
),
),
Provider<ScoreCalculatorService>(
@@ -44,9 +49,7 @@ void main() async {
Provider<GroupingAnalyzerService>(
create: (_) => GroupingAnalyzerService(),
),
Provider<SessionRepository>(
create: (_) => SessionRepository(),
),
Provider<SessionRepository>(create: (_) => SessionRepository()),
],
child: const BullyApp(),
),

View File

@@ -676,4 +676,399 @@ class DistortionCorrectionService {
points[2] = br;
points[3] = bl;
}
/// Corrige la perspective en reformant le plus grand ovale (ellipse) en un cercle parfait,
/// sans recadrer agressivement l'image entière.
Future<String> correctPerspectiveUsingOvals(String imagePath) async {
try {
final src = cv.imread(imagePath, flags: cv.IMREAD_COLOR);
if (src.isEmpty) throw Exception("Impossible de charger l'image");
final gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY);
final blurred = cv.gaussianBlur(gray, (5, 5), 0);
final thresh = cv.threshold(
blurred,
0,
255,
cv.THRESH_BINARY | cv.THRESH_OTSU,
);
final edges = cv.canny(blurred, thresh.$1 * 0.5, thresh.$1);
final contoursResult = cv.findContours(
edges,
cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE,
);
final contours = contoursResult.$1;
if (contours.isEmpty) return imagePath;
cv.RotatedRect? bestEllipse;
double maxArea = 0;
for (final contour in contours) {
if (contour.length < 5) continue;
final area = cv.contourArea(contour);
if (area < 1000) continue;
final ellipse = cv.fitEllipse(contour);
if (area > maxArea) {
maxArea = area;
bestEllipse = ellipse;
}
}
if (bestEllipse == null) return imagePath;
// The goal here is to morph the bestEllipse into a perfect circle, while
// keeping the image the same size and the center of the ellipse in the same place.
// We'll use the average of the width and height (or max) to define the target circle
final targetRadius =
math.max(bestEllipse.size.width, bestEllipse.size.height) / 2.0;
// Extract the 4 bounding box points of the ellipse
final boxPoints = cv.boxPoints(bestEllipse);
final List<cv.Point> srcPoints = [];
for (int i = 0; i < boxPoints.length; i++) {
srcPoints.add(cv.Point(boxPoints[i].x.toInt(), boxPoints[i].y.toInt()));
}
_sortPoints(srcPoints);
// Calculate the size of the perfectly squared output image
final int side = (targetRadius * 2).toInt();
final List<cv.Point> dstPoints = [
cv.Point(0, 0), // Top-Left
cv.Point(side, 0), // Top-Right
cv.Point(side, side), // Bottom-Right
cv.Point(0, side), // Bottom-Left
];
// Morph the target region into a perfect square, cropping the rest of the image
final M = cv.getPerspectiveTransform(
cv.VecPoint.fromList(srcPoints),
cv.VecPoint.fromList(dstPoints),
);
final corrected = cv.warpPerspective(src, M, (side, side));
final tempDir = await getTemporaryDirectory();
final timestamp = DateTime.now().millisecondsSinceEpoch;
final outputPath = '${tempDir.path}/corrected_oval_$timestamp.jpg';
cv.imwrite(outputPath, corrected);
return outputPath;
} catch (e) {
print('Erreur correction perspective ovales: $e');
return imagePath;
}
}
/// Corrige la distorsion et la profondeur (perspective) en créant un maillage
/// basé sur la concentricité des différents cercles de la cible pour trouver le meilleur plan.
Future<String> correctPerspectiveWithConcentricMesh(String imagePath) async {
try {
final src = cv.imread(imagePath, flags: cv.IMREAD_COLOR);
if (src.isEmpty) throw Exception("Impossible de charger l'image");
final gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY);
final blurred = cv.gaussianBlur(gray, (5, 5), 0);
final thresh = cv.threshold(
blurred,
0,
255,
cv.THRESH_BINARY | cv.THRESH_OTSU,
);
final edges = cv.canny(blurred, thresh.$1 * 0.5, thresh.$1);
final contoursResult = cv.findContours(
edges,
cv.RETR_LIST,
cv.CHAIN_APPROX_SIMPLE,
);
final contours = contoursResult.$1;
if (contours.isEmpty) return imagePath;
List<cv.RotatedRect> ellipses = [];
for (final contour in contours) {
if (contour.length < 5) continue;
if (cv.contourArea(contour) < 500) continue;
ellipses.add(cv.fitEllipse(contour));
}
if (ellipses.isEmpty) return imagePath;
// Find the largest ellipse to serve as our central reference
ellipses.sort(
(a, b) => (b.size.width * b.size.height).compareTo(
a.size.width * a.size.height,
),
);
final largestEllipse = ellipses.first;
final maxDist =
math.max(largestEllipse.size.width, largestEllipse.size.height) *
0.15;
// Group all ellipses that are roughly concentric with the largest one
List<cv.RotatedRect> concentricGroup = [];
for (final e in ellipses) {
final dx = e.center.x - largestEllipse.center.x;
final dy = e.center.y - largestEllipse.center.y;
if (math.sqrt(dx * dx + dy * dy) < maxDist) {
concentricGroup.add(e);
}
}
if (concentricGroup.length < 2) {
print(
"Pas assez de cercles concentriques pour le maillage, utilisation de la méthode simple.",
);
return await correctPerspectiveUsingOvals(imagePath);
}
final targetRadius =
math.max(largestEllipse.size.width, largestEllipse.size.height) / 2.0;
final int side = (targetRadius * 2.4).toInt(); // Add padding
final double cx = side / 2.0;
final double cy = side / 2.0;
List<cv.Point2f> srcPointsList = [];
List<cv.Point2f> dstPointsList = [];
for (final ellipse in concentricGroup) {
final box = cv.boxPoints(ellipse);
final m0 = cv.Point2f(
(box[0].x + box[1].x) / 2,
(box[0].y + box[1].y) / 2,
);
final m1 = cv.Point2f(
(box[1].x + box[2].x) / 2,
(box[1].y + box[2].y) / 2,
);
final m2 = cv.Point2f(
(box[2].x + box[3].x) / 2,
(box[2].y + box[3].y) / 2,
);
final m3 = cv.Point2f(
(box[3].x + box[0].x) / 2,
(box[3].y + box[0].y) / 2,
);
final d02 = math.sqrt(
math.pow(m0.x - m2.x, 2) + math.pow(m0.y - m2.y, 2),
);
final d13 = math.sqrt(
math.pow(m1.x - m3.x, 2) + math.pow(m1.y - m3.y, 2),
);
cv.Point2f maj1, maj2, min1, min2;
double r;
if (d02 > d13) {
maj1 = m0;
maj2 = m2;
min1 = m1;
min2 = m3;
r = d02 / 2.0;
} else {
maj1 = m1;
maj2 = m3;
min1 = m0;
min2 = m2;
r = d13 / 2.0;
}
// Sort maj1 and maj2 so maj1 is left/top
if ((maj1.x - maj2.x).abs() > (maj1.y - maj2.y).abs()) {
if (maj1.x > maj2.x) {
final t = maj1;
maj1 = maj2;
maj2 = t;
}
} else {
if (maj1.y > maj2.y) {
final t = maj1;
maj1 = maj2;
maj2 = t;
}
}
// Sort min1 and min2 so min1 is top/left
if ((min1.y - min2.y).abs() > (min1.x - min2.x).abs()) {
if (min1.y > min2.y) {
final t = min1;
min1 = min2;
min2 = t;
}
} else {
if (min1.x > min2.x) {
final t = min1;
min1 = min2;
min2 = t;
}
}
srcPointsList.addAll([maj1, maj2, min1, min2]);
dstPointsList.addAll([
cv.Point2f(cx - r, cy),
cv.Point2f(cx + r, cy),
cv.Point2f(cx, cy - r),
cv.Point2f(cx, cy + r),
]);
// Add ellipse centers mapping perfectly to the origin to force concentric depth alignment
srcPointsList.add(cv.Point2f(ellipse.center.x, ellipse.center.y));
dstPointsList.add(cv.Point2f(cx, cy));
}
// We explicitly convert points to VecPoint to use findHomography standard binding
final srcVec = cv.VecPoint.fromList(
srcPointsList.map((p) => cv.Point(p.x.toInt(), p.y.toInt())).toList(),
);
final dstVec = cv.VecPoint.fromList(
dstPointsList.map((p) => cv.Point(p.x.toInt(), p.y.toInt())).toList(),
);
final M = cv.findHomography(
cv.Mat.fromVec(srcVec),
cv.Mat.fromVec(dstVec),
method: cv.RANSAC,
);
if (M.isEmpty) {
return await correctPerspectiveUsingOvals(imagePath);
}
final corrected = cv.warpPerspective(src, M, (side, side));
final tempDir = await getTemporaryDirectory();
final timestamp = DateTime.now().millisecondsSinceEpoch;
final outputPath = '${tempDir.path}/corrected_mesh_$timestamp.jpg';
cv.imwrite(outputPath, corrected);
return outputPath;
} catch (e) {
print('Erreur correction perspective maillage concentrique: $e');
return imagePath;
}
}
/// Corrige la perspective en détectant les 4 coins de la feuille (quadrilatère)
///
/// Cette méthode cherche le plus grand polygone à 4 côtés (le bord du papier)
/// et le déforme pour en faire un carré parfait.
Future<String> correctPerspectiveUsingQuadrilateral(String imagePath) async {
try {
final src = cv.imread(imagePath, flags: cv.IMREAD_COLOR);
if (src.isEmpty) throw Exception("Impossible de charger l'image");
final gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY);
// Flou plus important pour ignorer les détails internes (cercles, trous)
final blurred = cv.gaussianBlur(gray, (9, 9), 0);
// Canny edge detector
final thresh = cv.threshold(
blurred,
0,
255,
cv.THRESH_BINARY | cv.THRESH_OTSU,
);
final edges = cv.canny(blurred, thresh.$1 * 0.5, thresh.$1);
// Pour la détection de la feuille (les bords peuvent être discontinus à cause de l'éclairage)
final kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5));
final closedEdges = cv.morphologyEx(edges, cv.MORPH_CLOSE, kernel);
// Find contours
final contoursResult = cv.findContours(
closedEdges,
cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE,
);
final contours = contoursResult.$1;
cv.VecPoint? bestQuad;
double maxArea = 0;
final minArea = src.rows * src.cols * 0.1; // Au moins 10% de l'image
for (final contour in contours) {
final area = cv.contourArea(contour);
if (area < minArea) continue;
final peri = cv.arcLength(contour, true);
// Approximation polygonale (tolérance = 2% à 5% du périmètre)
final approx = cv.approxPolyDP(contour, 0.04 * peri, true);
if (approx.length == 4) {
if (area > maxArea) {
maxArea = area;
bestQuad = approx;
}
}
}
// Fallback
if (bestQuad == null) {
print(
"Aucun papier quadrilatère détecté, on utilise les cercles à la place.",
);
return await correctPerspectiveUsingCircles(imagePath);
}
// Convert to List<cv.Point>
final List<cv.Point> srcPoints = [];
for (int i = 0; i < bestQuad.length; i++) {
srcPoints.add(bestQuad[i]);
}
_sortPoints(srcPoints);
// Calculate max width and height
double widthA = _distanceCV(srcPoints[2], srcPoints[3]);
double widthB = _distanceCV(srcPoints[1], srcPoints[0]);
int dstWidth = math.max(widthA, widthB).toInt();
double heightA = _distanceCV(srcPoints[1], srcPoints[2]);
double heightB = _distanceCV(srcPoints[0], srcPoints[3]);
int dstHeight = math.max(heightA, heightB).toInt();
// Since standard target paper forms a square, we force the resulting warp to be a perfect square.
int side = math.max(dstWidth, dstHeight);
final List<cv.Point> dstPoints = [
cv.Point(0, 0),
cv.Point(side, 0),
cv.Point(side, side),
cv.Point(0, side),
];
final M = cv.getPerspectiveTransform(
cv.VecPoint.fromList(srcPoints),
cv.VecPoint.fromList(dstPoints),
);
final corrected = cv.warpPerspective(src, M, (side, side));
final tempDir = await getTemporaryDirectory();
final timestamp = DateTime.now().millisecondsSinceEpoch;
final outputPath = '${tempDir.path}/corrected_quad_$timestamp.jpg';
cv.imwrite(outputPath, corrected);
return outputPath;
} catch (e) {
print('Erreur correction perspective quadrilatère: $e');
// Fallback
return await correctPerspectiveUsingCircles(imagePath);
}
}
double _distanceCV(cv.Point p1, cv.Point p2) {
final dx = p2.x - p1.x;
final dy = p2.y - p1.y;
return math.sqrt(dx * dx + dy * dy);
}
}

View File

@@ -153,7 +153,7 @@ class OpenCVImpactDetectionService {
);
final contours = contoursResult.$1;
// hierarchy is item2
// hierarchy is $2
for (int i = 0; i < contours.length; i++) {
final contour = contours[i];

View File

@@ -2,9 +2,12 @@ import 'dart:math' as math;
import '../data/models/target_type.dart';
import 'image_processing_service.dart';
import 'opencv_impact_detection_service.dart';
import 'yolo_impact_detection_service.dart';
export 'image_processing_service.dart' show ImpactDetectionSettings, ReferenceImpact, ImpactCharacteristics;
export 'opencv_impact_detection_service.dart' show OpenCVDetectionSettings, OpenCVDetectedImpact;
export 'image_processing_service.dart'
show ImpactDetectionSettings, ReferenceImpact, ImpactCharacteristics;
export 'opencv_impact_detection_service.dart'
show OpenCVDetectionSettings, OpenCVDetectedImpact;
class TargetDetectionResult {
final double centerX; // Relative (0-1)
@@ -52,18 +55,19 @@ class DetectedImpactResult {
class TargetDetectionService {
final ImageProcessingService _imageProcessingService;
final OpenCVImpactDetectionService _opencvService;
final YOLOImpactDetectionService _yoloService;
TargetDetectionService({
ImageProcessingService? imageProcessingService,
OpenCVImpactDetectionService? opencvService,
}) : _imageProcessingService = imageProcessingService ?? ImageProcessingService(),
_opencvService = opencvService ?? OpenCVImpactDetectionService();
YOLOImpactDetectionService? yoloService,
}) : _imageProcessingService =
imageProcessingService ?? ImageProcessingService(),
_opencvService = opencvService ?? OpenCVImpactDetectionService(),
_yoloService = yoloService ?? YOLOImpactDetectionService();
/// Detect target and impacts from an image file
TargetDetectionResult detectTarget(
String imagePath,
TargetType targetType,
) {
TargetDetectionResult detectTarget(String imagePath, TargetType targetType) {
try {
// Detect main target
final mainTarget = _imageProcessingService.detectMainTarget(imagePath);
@@ -84,7 +88,13 @@ class TargetDetectionService {
// Convert impacts to relative coordinates and calculate scores
final detectedImpacts = impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScore(impact.x, impact.y, centerX, centerY, radius)
? _calculateConcentricScore(
impact.x,
impact.y,
centerX,
centerY,
radius,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
@@ -149,9 +159,9 @@ class TargetDetectionService {
// Vertical zones
if (dy < -0.25) return 5; // Head zone (top)
if (dy < 0.0) return 5; // Center mass (upper body)
if (dy < 0.15) return 4; // Body
if (dy < 0.35) return 3; // Lower body
if (dy < 0.0) return 5; // Center mass (upper body)
if (dy < 0.15) return 4; // Body
if (dy < 0.35) return 3; // Lower body
return 0; // Outside target
}
@@ -177,7 +187,13 @@ class TargetDetectionService {
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
impact.x,
impact.y,
centerX,
centerY,
radius,
ringCount,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
@@ -221,7 +237,10 @@ class TargetDetectionService {
String imagePath,
List<ReferenceImpact> references,
) {
return _imageProcessingService.analyzeReferenceImpacts(imagePath, references);
return _imageProcessingService.analyzeReferenceImpacts(
imagePath,
references,
);
}
/// Detect impacts based on reference characteristics (calibrated detection)
@@ -245,7 +264,13 @@ class TargetDetectionService {
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
impact.x,
impact.y,
centerX,
centerY,
radius,
ringCount,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
@@ -283,7 +308,13 @@ class TargetDetectionService {
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
impact.x,
impact.y,
centerX,
centerY,
radius,
ringCount,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
@@ -315,9 +346,7 @@ class TargetDetectionService {
}) {
try {
// Convertir les références au format OpenCV
final refPoints = references
.map((r) => (x: r.x, y: r.y))
.toList();
final refPoints = references.map((r) => (x: r.x, y: r.y)).toList();
final impacts = _opencvService.detectFromReferences(
imagePath,
@@ -328,7 +357,13 @@ class TargetDetectionService {
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x, impact.y, centerX, centerY, radius, ringCount)
impact.x,
impact.y,
centerX,
centerY,
radius,
ringCount,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
@@ -343,4 +378,41 @@ class TargetDetectionService {
return [];
}
}
/// Détecte les impacts en utilisant YOLOv8
Future<List<DetectedImpactResult>> detectImpactsWithYOLO(
String imagePath,
TargetType targetType,
double centerX,
double centerY,
double radius,
int ringCount,
) async {
try {
final impacts = await _yoloService.detectImpacts(imagePath);
return impacts.map((impact) {
final score = targetType == TargetType.concentric
? _calculateConcentricScoreWithRings(
impact.x,
impact.y,
centerX,
centerY,
radius,
ringCount,
)
: _calculateSilhouetteScore(impact.x, impact.y, centerX, centerY);
return DetectedImpactResult(
x: impact.x,
y: impact.y,
radius: impact.radius,
suggestedScore: score,
);
}).toList();
} catch (e) {
print('Erreur détection YOLOv8: $e');
return [];
}
}
}

View File

@@ -0,0 +1,174 @@
import 'dart:io';
import 'dart:math' as math;
import 'dart:typed_data';
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:image/image.dart' as img;
import 'target_detection_service.dart';
class YOLOImpactDetectionService {
Interpreter? _interpreter;
static const String modelPath = 'assets/models/yolov11n_impact.tflite';
static const String labelsPath = 'assets/models/labels.txt';
Future<void> init() async {
if (_interpreter != null) return;
try {
// Try loading the specific YOLOv11 model first, fallback to v8 if not found
try {
_interpreter = await Interpreter.fromAsset(modelPath);
} catch (e) {
print('YOLOv11 model not found at $modelPath, trying YOLOv8 fallback');
_interpreter = await Interpreter.fromAsset(
'assets/models/yolov8n_impact.tflite',
);
}
print('YOLO Interpreter loaded successfully');
} catch (e) {
print('Error loading YOLO model: $e');
}
}
Future<List<DetectedImpactResult>> detectImpacts(String imagePath) async {
if (_interpreter == null) await init();
if (_interpreter == null) return [];
try {
final bytes = File(imagePath).readAsBytesSync();
final originalImage = img.decodeImage(bytes);
if (originalImage == null) return [];
// YOLOv8/v11 usually takes 640x640
const int inputSize = 640;
final resizedImage = img.copyResize(
originalImage,
width: inputSize,
height: inputSize,
);
// Prepare input tensor
var input = _imageToByteListFloat32(resizedImage, inputSize);
// Raw YOLO output shape usually [1, 4 + num_classes, 8400]
// For single class "impact", it's [1, 5, 8400]
var output = List<double>.filled(1 * 5 * 8400, 0).reshape([1, 5, 8400]);
_interpreter!.run(input, output);
return _processOutput(
output[0],
originalImage.width,
originalImage.height,
);
} catch (e) {
print('Error during YOLO inference: $e');
return [];
}
}
List<DetectedImpactResult> _processOutput(
List<List<double>> output,
int imgWidth,
int imgHeight,
) {
final List<_Detection> candidates = [];
const double threshold = 0.25;
// output is [5, 8400] -> [x, y, w, h, conf]
for (int i = 0; i < 8400; i++) {
final double confidence = output[4][i];
if (confidence > threshold) {
candidates.add(
_Detection(
x: output[0][i],
y: output[1][i],
w: output[2][i],
h: output[3][i],
confidence: confidence,
),
);
}
}
// Apply Non-Max Suppression (NMS)
final List<_Detection> suppressed = _nms(candidates);
return suppressed
.map(
(det) => DetectedImpactResult(
x: det.x / 640.0,
y: det.y / 640.0,
radius: 5.0,
suggestedScore: 0,
),
)
.toList();
}
List<_Detection> _nms(List<_Detection> detections) {
if (detections.isEmpty) return [];
// Sort by confidence descending
detections.sort((a, b) => b.confidence.compareTo(a.confidence));
final List<_Detection> selected = [];
final List<bool> active = List.filled(detections.length, true);
for (int i = 0; i < detections.length; i++) {
if (!active[i]) continue;
selected.add(detections[i]);
for (int j = i + 1; j < detections.length; j++) {
if (!active[j]) continue;
if (_iou(detections[i], detections[j]) > 0.45) {
active[j] = false;
}
}
}
return selected;
}
double _iou(_Detection a, _Detection b) {
final double areaA = a.w * a.h;
final double areaB = b.w * b.h;
final double x1 = math.max(a.x - a.w / 2, b.x - b.w / 2);
final double y1 = math.max(a.y - a.h / 2, b.y - b.h / 2);
final double x2 = math.min(a.x + a.w / 2, b.x + b.w / 2);
final double y2 = math.min(a.y + a.h / 2, b.y + b.h / 2);
final double intersection = math.max(0.0, x2 - x1) * math.max(0.0, y2 - y1);
return intersection / (areaA + areaB - intersection);
}
Uint8List _imageToByteListFloat32(img.Image image, int inputSize) {
var convertedBytes = Float32List(1 * inputSize * inputSize * 3);
var buffer = Float32List.view(convertedBytes.buffer);
int pixelIndex = 0;
for (int i = 0; i < inputSize; i++) {
for (int j = 0; j < inputSize; j++) {
var pixel = image.getPixel(j, i);
buffer[pixelIndex++] = (pixel.r / 255.0);
buffer[pixelIndex++] = (pixel.g / 255.0);
buffer[pixelIndex++] = (pixel.b / 255.0);
}
}
return convertedBytes.buffer.asUint8List();
}
}
class _Detection {
final double x, y, w, h, confidence;
_Detection({
required this.x,
required this.y,
required this.w,
required this.h,
required this.confidence,
});
}

View File

@@ -7,6 +7,7 @@ list(APPEND FLUTTER_PLUGIN_LIST
)
list(APPEND FLUTTER_FFI_PLUGIN_LIST
tflite_flutter
)
set(PLUGIN_BUNDLED_LIBRARIES)

View File

@@ -536,6 +536,14 @@ packages:
url: "https://pub.dev"
source: hosted
version: "2.2.0"
quiver:
dependency: transitive
description:
name: quiver
sha256: ea0b925899e64ecdfbf9c7becb60d5b50e706ade44a85b2363be2a22d88117d2
url: "https://pub.dev"
source: hosted
version: "3.2.2"
sky_engine:
dependency: transitive
description: flutter
@@ -653,6 +661,14 @@ packages:
url: "https://pub.dev"
source: hosted
version: "0.7.9"
tflite_flutter:
dependency: "direct main"
description:
name: tflite_flutter
sha256: ffb8651fdb116ab0131d6dc47ff73883e0f634ad1ab12bb2852eef1bbeab4a6a
url: "https://pub.dev"
source: hosted
version: "0.10.4"
typed_data:
dependency: transitive
description:

View File

@@ -64,6 +64,9 @@ dependencies:
# Image processing for impact detection
image: ^4.1.7
# Machine Learning for YOLOv8
tflite_flutter: ^0.10.4
dev_dependencies:
flutter_test:
sdk: flutter

View File

@@ -0,0 +1,12 @@
import 'package:opencv_dart/opencv_dart.dart' as cv;
void main() {
var p1 = cv.VecPoint.fromList([cv.Point(0, 0), cv.Point(1, 1)]);
var p2 = cv.VecPoint2f.fromList([cv.Point2f(0, 0), cv.Point2f(1, 1)]);
// Is it p1.mat ?
// Or is it cv.findHomography(p1, p1) but actually needs specific types ?
cv.Mat mat1 = cv.Mat.fromVec(p1);
cv.Mat mat2 = cv.Mat.fromVec(p2);
cv.findHomography(mat1, mat2);
}

View File

@@ -0,0 +1,7 @@
import 'package:opencv_dart/opencv_dart.dart' as cv;
void main() {
print(cv.approxPolyDP);
print(cv.arcLength);
print(cv.contourArea);
}

View File

@@ -0,0 +1,5 @@
import 'package:opencv_dart/opencv_dart.dart' as cv;
void main() {
print(cv.findHomography);
}

View File

@@ -7,6 +7,7 @@ list(APPEND FLUTTER_PLUGIN_LIST
)
list(APPEND FLUTTER_FFI_PLUGIN_LIST
tflite_flutter
)
set(PLUGIN_BUNDLED_LIBRARIES)