cpp branch

This commit is contained in:
ackimixs
2024-01-14 13:11:41 +01:00
parent 3285e91ae2
commit 6b60bd175d
30 changed files with 223 additions and 496 deletions

5
.gitignore vendored
View File

@@ -1,4 +1 @@
/venv/
/.idea/
/.vscode/
*.xml
/cmake-build-debug/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 619 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 673 B

30
CMakeLists.txt Normal file
View File

@@ -0,0 +1,30 @@
cmake_minimum_required(VERSION 3.27)
project(detection_pot)
set(CMAKE_CXX_STANDARD 17)
find_package( OpenCV REQUIRED )
include_directories( ${OpenCV_INCLUDE_DIRS} )
set(COMMON_SOURCES
)
set(calibrationSources
${COMMON_SOURCES}
calibration.cpp
)
add_executable(calibration ${calibrationSources})
target_link_libraries( calibration ${OpenCV_LIBS} )
# Project 2 sources and executable
set(arucoDetectionSources
${COMMON_SOURCES}
arucoDetector.cpp
)
add_executable(arucoDetector ${arucoDetectionSources})
target_link_libraries( arucoDetector ${OpenCV_LIBS} )

Binary file not shown.

View File

@@ -1,30 +0,0 @@
item {
name: "plante_dans_pot_couchee",
id: 1,
display_name: "plante_dans_pot_couchee"
}
item {
name: "plante_dans_pot_debout",
id: 2,
display_name: "plante_dans_pot_debout"
}
item {
name: "plante_fragile_couchee",
id: 3,
display_name: "plante_fragile_couchee"
}
item {
name: "plante_fragile_debout",
id: 4,
display_name: "plante_fragile_debout"
}
item {
name: "pot_vide_couche",
id: 5,
display_name: "pot_vide_couche"
}
item {
name: "pot_vide_debout",
id: 6,
display_name: "pot_vide_debout"
}

View File

@@ -1,8 +0,0 @@
# Detection de pot | Coupe de france de robotique
### Description
Ce repo correspond a la detection de pot de fleur pour la coupe de france de robotique 2023
Réaliser par la team Modelec !
### Tech
opencv: python v4.8

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

137
aruco.py
View File

@@ -1,137 +0,0 @@
import cv2 as cv
import cv2.aruco as aruco
import numpy as np
# Focal de la cam
# Calculer la focal avec le fichier get_the_focal.py
# FOCAL_LENGTH = 1444 # Téléphone Maxime pixel 7 pro
FOCAL_LENGTH = 1470
# FOCAL_LENGTH = 600
arucoTagMapping = {
47: ["Panneau solaire", 3.62],
36: ["Fleur blanche", 2],
13: ["FLeur violette", 2]
}
# Charger une image depuis la caméra (0 pour la caméra par défaut, généralement la webcam)
cap = cv.VideoCapture(0)
# Définir le dictionnaire ArUco
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# Créer le détecteur ArUco
parameters = aruco.DetectorParameters()
#
detector = aruco.ArucoDetector(aruco_dict, parameters)
while True:
# Lire une image depuis la caméra
ret, frame = cap.read()
# Convertir l'image en niveaux de gris
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Détecter les marqueurs ArUco
corners, ids, rejectedImgPoints = detector.detectMarkers(gray)
# Dessiner les résultats
if ids is not None:
# aruco.drawDetectedMarkers(frame, corners, ids)
for i in range(len(ids)):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
if markerID not in arucoTagMapping:
continue
try:
tagCorners = markerCorner.reshape((4, 2))
except:
print("Error with the corners")
continue
(topLeft, topRight, bottomRight, bottomLeft) = tagCorners
topRight = (int(topRight[0]), int(topRight[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
center_x = int((topLeft[0] + topRight[0] + bottomRight[0] + bottomLeft[0]) / 4)
center_y = int((topLeft[1] + topRight[1] + bottomRight[1] + bottomLeft[1]) / 4)
# Calculate the apparent width in pixels
P = np.sqrt((topRight[0] - topLeft[0]) ** 2 + (topRight[1] - topLeft[1]) ** 2)
# Calculate the distance to the Aruco tag
D = (arucoTagMapping[markerID][1] * FOCAL_LENGTH) / P
tag_position_x = (topRight[0] + topLeft[0]) / 2
tag_position_y = (topRight[1] + bottomRight[1]) / 2
image_width = frame.shape[1]
image_height = frame.shape[0]
# Calcul de l'angle horizontal par rapport au centre de l'image
angle_to_tag_horizontal = np.arctan2(tag_position_x - (image_width / 2),
image_width / (2 * np.tan(np.radians(60))))
# Conversion de l'angle en degrés
angle_to_tag_horizontal_deg = np.degrees(angle_to_tag_horizontal)
# Calculate the difference in the x and y coordinates
dx = topRight[0] - topLeft[0]
dy = topRight[1] - topLeft[1]
# Calculate the angle in radians
angle_radians = np.arctan2(dy, dx)
# Convert the angle to degrees
angle_degrees = np.degrees(angle_radians)
# Print the rotation angle of the ArUco tag
sens_du_tag = ""
if -15 < angle_degrees < 15:
sens_du_tag = "debout"
elif 15 < angle_degrees < 60:
sens_du_tag = "penche a droite"
elif 60 < angle_degrees < 110:
sens_du_tag = "tombe a droite"
elif 110 < angle_degrees < 180:
sens_du_tag = "a l'envers"
elif -180 < angle_degrees < -110:
sens_du_tag = "a l'envers"
elif -110 < angle_degrees < -60:
sens_du_tag = "tombe a gauche"
elif -60 < angle_degrees < -15:
sens_du_tag = "penche a gauche"
# Affichage des informations
cv.putText(frame, f"{arucoTagMapping[markerID][0]}, id : {markerID}", (topLeft[0], topLeft[1] - 90),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
distance_str = "{:.2f}".format(D)
cv.putText(frame, f"Distance : {distance_str} cm", (topLeft[0], topLeft[1] - 60),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
angle_str = "{:.2f}".format(angle_to_tag_horizontal_deg)
cv.putText(frame, f"Angle horizontale : {angle_str} degrees", (topLeft[0], topLeft[1] - 30),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
cv.putText(frame, f"Le tag est {sens_du_tag}", (topLeft[0], topLeft[1]),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# print(f"{arucoTagMapping[markerID][0]} : {angle_to_tag_horizontal_deg} degrees, {D} cm")
# Afficher l'image
cv.imshow('Frame', frame)
# Sortir de la boucle si la touche 'q' est enfoncée
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Libérer la capture et fermer la fenêtre
cap.release()
cv.destroyAllWindows()

97
arucoDetector.cpp Normal file
View File

@@ -0,0 +1,97 @@
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
int main()
{
cv::VideoCapture cap(2); // Open default camera (change the argument if using a different camera)
if (!cap.isOpened()) {
std::cerr << "Error opening camera." << std::endl;
return -1;
}
cv::namedWindow("ArUco Detection", cv::WINDOW_NORMAL);
cv::Mat cameraMatrix, distCoeffs;
cv::FileStorage fs("../calibration_images/calibration_results.yaml", cv::FileStorage::READ);
if (fs.isOpened()) {
fs["cameraMatrix"] >> cameraMatrix;
fs["distCoeffs"] >> distCoeffs;
fs.release();
} else {
std::cerr << "Error reading calibration file." << std::endl;
return -1;
}
float markerLength = 0.006;
// Set coordinate system
cv::Mat objPoints(4, 1, CV_32FC3);
objPoints.ptr<cv::Vec3f>(0)[0] = cv::Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[1] = cv::Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[2] = cv::Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[3] = cv::Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
cv::aruco::DetectorParameters detectorParams = cv::aruco::DetectorParameters();
cv::aruco::Dictionary dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
cv::aruco::ArucoDetector detector(dictionary, detectorParams);
while (true) {
cv::Mat frame;
cap >> frame; // Capture frame from the camera
if (frame.empty()) {
std::cerr << "Error capturing frame." << std::endl;
break;
}
cv::Mat gray;
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
detector.detectMarkers(frame, markerCorners, markerIds);
if (!markerIds.empty()) {
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
size_t nMarkers = markerCorners.size();
for (size_t i = 0; i < nMarkers; i++) {
cv::Mat rvec, tvec;
solvePnP(objPoints, markerCorners.at(i), cameraMatrix, distCoeffs, rvec, tvec);
drawFrameAxes(frame, cameraMatrix, distCoeffs, rvec, tvec, markerLength/2.f);
// Extract rotation matrix from rotation vector
cv::Mat R;
Rodrigues(rvec, R);
// Calculate Euler angles (in degrees)
cv::Mat euler;
Rodrigues(R, euler);
// Access elements of the Euler angles matrix
double yaw = euler.at<double>(2, 0) * (180.0 / CV_PI);
// Distance to ArUco marker (assuming tagLength is known)
double distance = tvec.at<double>(2, 0);
// Print the distance and yaw angle
std::cout << "Distance to ArUco marker " << markerIds[i] << ": " << distance << " meters" << ", " << yaw << " degrees" << std::endl;
}
}
cv::imshow("ArUco Detection", frame);
if (cv::waitKey(30) == 27) // Press 'Esc' to exit
break;
}
cap.release();
cv::destroyAllWindows();
return 0;
}

80
calibration.cpp Normal file
View File

@@ -0,0 +1,80 @@
#include <iostream>
#include <filesystem>
#include <opencv2/opencv.hpp>
int main(int argc, char *argv[])
{
if (argc < 2) {
std::cout << "Usage: " << argv[0] << " <directory>" << std::endl;
return 1;
}
// Set the chessboard size (number of inner corners in width and height)
cv::Size chessboardSize(9, 6);
cv::Size imgSize;
// Create vectors to store the detected chessboard corners and corresponding image points
std::vector<std::vector<cv::Point3f>> objectPoints; // 3D world points
std::vector<std::vector<cv::Point2f>> imagePoints; // 2D image points
// Generate the 3D world points for the chessboard corners
std::vector<cv::Point3f> worldPoints;
for (int i = 0; i < chessboardSize.height; ++i) {
for (int j = 0; j < chessboardSize.width; ++j) {
worldPoints.emplace_back(j, i, 0); // Assuming the chessboard lies in the XY plane (Z=0)
}
}
const std::string pathName = argv[1];
try {
for (const auto& entry : std::filesystem::directory_iterator(pathName)) {
if (entry.is_regular_file()) {
// Check if the file has a ".jpg" or ".png" extension
std::string fileExtension = entry.path().extension().string();
if (fileExtension == ".jpg" || fileExtension == ".png") {
std::cout << entry.path() << std::endl;
// Load the calibration image
cv::Mat image = cv::imread(entry.path().string());
imgSize = image.size();
// Convert the image to grayscale
cv::Mat gray;
cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY);
// Find chessboard corners
std::vector<cv::Point2f> corners;
bool found = cv::findChessboardCorners(gray, chessboardSize, corners);
if (found) {
cv::cornerSubPix(gray, corners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
imagePoints.push_back(corners);
objectPoints.push_back(worldPoints);
}
}
}
}
} catch (const std::filesystem::filesystem_error& e) {
std::cerr << "Error accessing the directory: " << e.what() << std::endl;
}
cv::Mat cameraMatrix, distCoeffs;
std::vector<cv::Mat> rvecs, tvecs;
calibrateCamera(objectPoints, imagePoints, imgSize,
cameraMatrix, distCoeffs, rvecs, tvecs);
cv::FileStorage fs(pathName + "/calibration_results.yaml", cv::FileStorage::WRITE);
fs << "cameraMatrix" << cameraMatrix;
fs << "distCoeffs" << distCoeffs;
fs.release(); // Release the file
return 0;
}

View File

@@ -0,0 +1,15 @@
%YAML:1.0
---
cameraMatrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 1.5754108670997639e+03, 0., 9.7626121802635407e+02, 0.,
1.5717224359398226e+03, 5.7135445053109106e+02, 0., 0., 1. ]
distCoeffs: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 4.3651647364245744e-01, -3.1772901593595240e+00,
2.1070001064204457e-03, 1.8492055868427389e-03,
6.5313300440393744e+00 ]

Binary file not shown.

After

Width:  |  Height:  |  Size: 258 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 253 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 265 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 246 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

View File

@@ -1,22 +0,0 @@
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
#video = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imshow("frame", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

View File

@@ -1,72 +0,0 @@
import cv2 as cv
import cv2.aruco as aruco
import numpy as np
# Actual width of the Aruco tag in some unit (like centimeters)
W = 20
# Actual distance from the camera to the object in the same unit as W
D = 150
# Charger une image depuis la caméra (0 pour la caméra par défaut, généralement la webcam)
cap = cv.VideoCapture(0)
# Définir le dictionnaire ArUco
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# Créer le détecteur ArUco
parameters = aruco.DetectorParameters()
detector = aruco.ArucoDetector(aruco_dict, parameters)
while True:
# Lire une image depuis la caméra
ret, frame = cap.read()
# Convertir l'image en niveaux de gris
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Détecter les marqueurs ArUco
corners, ids, rejectedImgPoints = detector.detectMarkers(gray)
# Dessiner les résultats
if ids is not None:
aruco.drawDetectedMarkers(frame, corners, ids)
for i in range(len(ids)):
# verify *at least* one ArUco marker was detected
if len(corners) > 0:
# Flatten the ArUco IDs list
ids = ids.flatten()
# Loop over the detected ArUCo corners
for (markerCorners, markerID) in zip(corners, ids):
# Extract the marker corners
try:
corners = markerCorners.reshape((4, 2))
except:
continue
(topLeft, topRight, bottomRight, bottomLeft) = corners
# Convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# Calculate the apparent width in pixels
P = np.sqrt((topRight[0] - topLeft[0]) ** 2 + (topRight[1] - topLeft[1]) ** 2)
# Calculate the focal length
F = (P * D) / W
print(f"The focal length of the camera is: {F} pixels")
# Afficher l'image
cv.imshow('Frame', frame)
# Sortir de la boucle si la touche 'q' est enfoncée
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Libérer la capture et fermer la fenêtre
cap.release()
cv.destroyAllWindows()

View File

@@ -1,90 +0,0 @@
import cv2 as cv
import cv2.aruco as aruco
import numpy as np
# Focal de la cam
# Calculer la focal avec le fichier get_the_focal.py
FOCAL_LENGTH = 600
ARUCO_SIZE = 2.2
# Charger une image depuis la caméra (0 pour la caméra par défaut, généralement la webcam)
cap = cv.VideoCapture(0)
# Définir le dictionnaire ArUco
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# Créer le détecteur ArUco
parameters = aruco.DetectorParameters()
#
detector = aruco.ArucoDetector(aruco_dict, parameters)
while True:
# Lire une image depuis la caméra
ret, frame = cap.read()
# Convertir l'image en niveaux de gris
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Détecter les marqueurs ArUco
corners, ids, rejectedImgPoints = detector.detectMarkers(gray)
# Dessiner les résultats
if ids is not None:
aruco.drawDetectedMarkers(frame, corners, ids)
for i in range(len(ids)):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
center_x = int((topLeft[0] + topRight[0] + bottomRight[0] + bottomLeft[0]) / 4)
center_y = int((topLeft[1] + topRight[1] + bottomRight[1] + bottomLeft[1]) / 4)
# Calculate the apparent width in pixels
P = np.sqrt((topRight[0] - topLeft[0]) ** 2 + (topRight[1] - topLeft[1]) ** 2)
# Calculate the distance to the Aruco tag
D = (ARUCO_SIZE * FOCAL_LENGTH) / P
tag_position_x = (topRight[0] + topLeft[0]) / 2
tag_position_y = (topRight[1] + bottomRight[1]) / 2
image_width = frame.shape[1]
image_height = frame.shape[0]
# Calcul de l'angle horizontal par rapport au centre de l'image
angle_to_tag_horizontal = np.arctan2(tag_position_x - (image_width / 2),
image_width / (2 * np.tan(np.radians(60))))
# Conversion de l'angle en degrés
angle_to_tag_horizontal_deg = np.degrees(angle_to_tag_horizontal)
# Affichage des informations
distance_str = "{:.2f}".format(D)
cv.putText(frame, f"Distance : {distance_str} cm", (topLeft[0], topLeft[1] - 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
angle_str = "{:.2f}".format(angle_to_tag_horizontal_deg)
cv.putText(frame, f"Angle : {angle_str} degrees", (topLeft[0], topLeft[1] - 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
print(f"id : {markerID} : {angle_to_tag_horizontal_deg} degrees, {D} cm")
# Afficher l'image
cv.imshow('Frame', frame)
# Sortir de la boucle si la touche 'q' est enfoncée
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Libérer la capture et fermer la fenêtre
cap.release()
cv.destroyAllWindows()

View File

@@ -1,16 +0,0 @@
import cv2
import sys
print("Modelec")
print("Your OpenCV version is: " + cv2.__version__)
img = cv2.imread(cv2.samples.findFile("WIN_20231123_18_41_25_Pro.jpg"))
if img is None:
sys.exit("Could not read the image.")
cv2.imshow("Display window", img)
k = cv2.waitKey(0)
if k == ord("s"):
cv2.imwrite("WIN_20231123_18_41_25_Pro.jpg", img)

View File

@@ -1,42 +0,0 @@
import cv2 as cv2
import numpy as np
import tensorflow as tf
model = tf.saved_model.load('Pot-plante.tfrecord')
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Prétraiter l'image pour l'entrée du modèle
input_image = cv2.resize(frame, (300, 300))
input_image = input_image / 127.5 - 1.0 # Normalisation
# Effectuer la détection d'objets avec le modèle TensorFlow
detections = model(tf.convert_to_tensor(np.expand_dims(input_image, axis=0), dtype=tf.float32))
# Dessiner les boîtes englobantes sur l'image
for detection in detections['detection_boxes'][0].numpy():
ymin, xmin, ymax, xmax = detection
xmin = int(xmin * frame.shape[1])
xmax = int(xmax * frame.shape[1])
ymin = int(ymin * frame.shape[0])
ymax = int(ymax * frame.shape[0])
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
# Afficher le cadre résultant
cv2.imshow("Object Detection", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

50
reco.py
View File

@@ -1,50 +0,0 @@
import cv2 as cv
import argparse
def detectAndDisplay(frame):
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_gray = cv.equalizeHist(frame_gray)
#-- Detect faces
faces = face_cascade.detectMultiScale(frame_gray)
for (x,y,w,h) in faces:
center = (x + w//2, y + h//2)
frame = cv.ellipse(frame, center, (w//2, h//2), 0, 0, 360, (255, 0, 255), 4)
faceROI = frame_gray[y:y+h,x:x+w]
#-- In each face, detect eyes
eyes = eyes_cascade.detectMultiScale(faceROI)
for (x2,y2,w2,h2) in eyes:
eye_center = (x + x2 + w2//2, y + y2 + h2//2)
radius = int(round((w2 + h2)*0.25))
frame = cv.circle(frame, eye_center, radius, (255, 0, 0 ), 4)
cv.imshow('Capture - Face detection', frame)
parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.')
parser.add_argument('--face_cascade', help='Path to face cascade.', default='haarcascade_frontalface_alt.xml')
parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='haarcascade_eye_tree_eyeglasses.xml')
parser.add_argument('--camera', help='Camera divide number.', type=int, default=0)
args = parser.parse_args()
face_cascade_name = args.face_cascade
eyes_cascade_name = args.eyes_cascade
face_cascade = cv.CascadeClassifier()
eyes_cascade = cv.CascadeClassifier()
#-- 1. Load the cascades
if not face_cascade.load(cv.samples.findFile(face_cascade_name)):
print('--(!)Error loading face cascade')
exit(0)
if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)):
print('--(!)Error loading eyes cascade')
exit(0)
camera_device = args.camera
#-- 2. Read the video stream
cap = cv.VideoCapture(camera_device)
if not cap.isOpened:
print('--(!)Error opening video capture')
exit(0)
while True:
ret, frame = cap.read()
if frame is None:
print('--(!) No captured frame -- Break!')
break
detectAndDisplay(frame)
if cv.waitKey(10) == 27:
break

View File

@@ -1,25 +0,0 @@
import tensorflow as tf
# Définir la fonction pour parser les exemples TFRecord (similaire à ce que vous avez utilisé)
def _parse_function(proto):
keys_to_features = {
'image/encoded': tf.io.FixedLenFeature([], tf.string),
'image/format': tf.io.FixedLenFeature([], tf.string),
'image/object/class/label': tf.io.FixedLenFeature([], tf.int64), # Assurez-vous que le type correspond
'image/object/bbox/xmin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/xmax': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymax': tf.io.FixedLenFeature([], tf.float32),
}
parsed_features = tf.io.parse_single_example(proto, keys_to_features)
return parsed_features
# Charger les TFRecords
tfrecords_path = "Pot-plante.tfrecord"
dataset = tf.data.TFRecordDataset(tfrecords_path)
# Afficher les informations pour chaque exemple TFRecord
for raw_record in dataset.take(5): # Prenez les 5 premiers exemples pour illustration
parsed_record = _parse_function(raw_record.numpy())
print(parsed_record)