Try to make work the modele

This commit is contained in:
2023-11-23 19:53:09 +01:00
parent 0a10dd99c5
commit 937b80e02b
6 changed files with 104 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/venv/

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

BIN
Pot-plante.tfrecord Normal file

Binary file not shown.

View File

@@ -0,0 +1,30 @@
item {
name: "plante_dans_pot_couchee",
id: 1,
display_name: "plante_dans_pot_couchee"
}
item {
name: "plante_dans_pot_debout",
id: 2,
display_name: "plante_dans_pot_debout"
}
item {
name: "plante_fragile_couchee",
id: 3,
display_name: "plante_fragile_couchee"
}
item {
name: "plante_fragile_debout",
id: 4,
display_name: "plante_fragile_debout"
}
item {
name: "pot_vide_couche",
id: 5,
display_name: "pot_vide_couche"
}
item {
name: "pot_vide_debout",
id: 6,
display_name: "pot_vide_debout"
}

42
modele.py Normal file
View File

@@ -0,0 +1,42 @@
import cv2 as cv
import numpy as np
import tensorflow as tf
model = tf.saved_model.load('Pot-plante.tfrecord')
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Prétraiter l'image pour l'entrée du modèle
input_image = cv2.resize(frame, (300, 300))
input_image = input_image / 127.5 - 1.0 # Normalisation
# Effectuer la détection d'objets avec le modèle TensorFlow
detections = model(tf.convert_to_tensor(np.expand_dims(input_image, axis=0), dtype=tf.float32))
# Dessiner les boîtes englobantes sur l'image
for detection in detections['detection_boxes'][0].numpy():
ymin, xmin, ymax, xmax = detection
xmin = int(xmin * frame.shape[1])
xmax = int(xmax * frame.shape[1])
ymin = int(ymin * frame.shape[0])
ymax = int(ymax * frame.shape[0])
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
# Afficher le cadre résultant
cv2.imshow("Object Detection", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

25
train.py Normal file
View File

@@ -0,0 +1,25 @@
import tensorflow as tf
# Définir la fonction pour parser les exemples TFRecord (similaire à ce que vous avez utilisé)
def _parse_function(proto):
keys_to_features = {
'image/encoded': tf.io.FixedLenFeature([], tf.string),
'image/format': tf.io.FixedLenFeature([], tf.string),
'image/object/class/label': tf.io.FixedLenFeature([], tf.int64), # Assurez-vous que le type correspond
'image/object/bbox/xmin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymin': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/xmax': tf.io.FixedLenFeature([], tf.float32),
'image/object/bbox/ymax': tf.io.FixedLenFeature([], tf.float32),
}
parsed_features = tf.io.parse_single_example(proto, keys_to_features)
return parsed_features
# Charger les TFRecords
tfrecords_path = "Pot-plante.tfrecord"
dataset = tf.data.TFRecordDataset(tfrecords_path)
# Afficher les informations pour chaque exemple TFRecord
for raw_record in dataset.take(5): # Prenez les 5 premiers exemples pour illustration
parsed_record = _parse_function(raw_record.numpy())
print(parsed_record)