diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9f21b54 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/venv/ diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Pot-plante.tfrecord b/Pot-plante.tfrecord new file mode 100644 index 0000000..e758d36 Binary files /dev/null and b/Pot-plante.tfrecord differ diff --git a/Pot-plante_label_map.pbtxt b/Pot-plante_label_map.pbtxt new file mode 100644 index 0000000..7c8cc54 --- /dev/null +++ b/Pot-plante_label_map.pbtxt @@ -0,0 +1,30 @@ +item { + name: "plante_dans_pot_couchee", + id: 1, + display_name: "plante_dans_pot_couchee" +} +item { + name: "plante_dans_pot_debout", + id: 2, + display_name: "plante_dans_pot_debout" +} +item { + name: "plante_fragile_couchee", + id: 3, + display_name: "plante_fragile_couchee" +} +item { + name: "plante_fragile_debout", + id: 4, + display_name: "plante_fragile_debout" +} +item { + name: "pot_vide_couche", + id: 5, + display_name: "pot_vide_couche" +} +item { + name: "pot_vide_debout", + id: 6, + display_name: "pot_vide_debout" +} diff --git a/modele.py b/modele.py new file mode 100644 index 0000000..926c522 --- /dev/null +++ b/modele.py @@ -0,0 +1,42 @@ +import cv2 as cv +import numpy as np +import tensorflow as tf + +model = tf.saved_model.load('Pot-plante.tfrecord') + +cap = cv2.VideoCapture(0) +if not cap.isOpened(): + print("Cannot open camera") + exit() + +while True: + ret, frame = cap.read() + if not ret: + print("Can't receive frame (stream end?). Exiting ...") + break + + # Prétraiter l'image pour l'entrée du modèle + input_image = cv2.resize(frame, (300, 300)) + input_image = input_image / 127.5 - 1.0 # Normalisation + + # Effectuer la détection d'objets avec le modèle TensorFlow + detections = model(tf.convert_to_tensor(np.expand_dims(input_image, axis=0), dtype=tf.float32)) + + # Dessiner les boîtes englobantes sur l'image + for detection in detections['detection_boxes'][0].numpy(): + ymin, xmin, ymax, xmax = detection + xmin = int(xmin * frame.shape[1]) + xmax = int(xmax * frame.shape[1]) + ymin = int(ymin * frame.shape[0]) + ymax = int(ymax * frame.shape[0]) + + cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) + + # Afficher le cadre résultant + cv2.imshow("Object Detection", frame) + + if cv2.waitKey(1) == ord("q"): + break + +cap.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/train.py b/train.py new file mode 100644 index 0000000..e5c0b99 --- /dev/null +++ b/train.py @@ -0,0 +1,25 @@ +import tensorflow as tf + +# Définir la fonction pour parser les exemples TFRecord (similaire à ce que vous avez utilisé) +def _parse_function(proto): + keys_to_features = { + 'image/encoded': tf.io.FixedLenFeature([], tf.string), + 'image/format': tf.io.FixedLenFeature([], tf.string), + 'image/object/class/label': tf.io.FixedLenFeature([], tf.int64), # Assurez-vous que le type correspond + 'image/object/bbox/xmin': tf.io.FixedLenFeature([], tf.float32), + 'image/object/bbox/ymin': tf.io.FixedLenFeature([], tf.float32), + 'image/object/bbox/xmax': tf.io.FixedLenFeature([], tf.float32), + 'image/object/bbox/ymax': tf.io.FixedLenFeature([], tf.float32), + } + parsed_features = tf.io.parse_single_example(proto, keys_to_features) + return parsed_features + + +# Charger les TFRecords +tfrecords_path = "Pot-plante.tfrecord" +dataset = tf.data.TFRecordDataset(tfrecords_path) + +# Afficher les informations pour chaque exemple TFRecord +for raw_record in dataset.take(5): # Prenez les 5 premiers exemples pour illustration + parsed_record = _parse_function(raw_record.numpy()) + print(parsed_record)