Hey
I'm taking an old project of mine and adding some "Tensorflow magic" to it. Basically I have this car that I control with a pi, and it has attributes like sensors, cameras and servos. I want to add some object detection to the camera feed. First I tried to check if the tensorflow object detection worked in a seperate script. And it worked very well.
However when I tried to implement it in my previous class it gets stuck on the line below which I've added in the show_camera_feed method. It literally will not go past this line.I thought maybe my CPU was overloaded since I have four different subprocesses running at once in this, so I tried disabling two of them, but still the same result. If you have any tips on how to solve this I'd appreciate the tips.
Full classFull project:
https://github.com/Siggerud/RoboCar_2.0
I'm taking an old project of mine and adding some "Tensorflow magic" to it. Basically I have this car that I control with a pi, and it has attributes like sensors, cameras and servos. I want to add some object detection to the camera feed. First I tried to check if the tensorflow object detection worked in a seperate script. And it worked very well.
Code:
import cv2from time import timefrom picamera2 import Picamera2from tflite_support.task import corefrom tflite_support.task import processorfrom tflite_support.task import visionimport utilsmodel = "efficientdet_lite0.tflite" numThreads = 4 dispW = int(1280 / 2) dispH = int(720 / 2) picam2 = Picamera2() picam2.preview_configuration.main.size=(dispW, dispH) picam2.preview_configuration.main.format='RGB888' picam2.preview_configuration.align() picam2.configure("preview") picam2.start() pos = (20,60) font = cv2.FONT_HERSHEY_SIMPLEX height = 1.5 weight = 3 myColor = (255, 0, 0) fps = 0 baseOptions = core.BaseOptions(file_name=model, use_coral=False, num_threads=numThreads) detectionOptions = processor.DetectionOptions(max_results=3, score_threshold=0.5) options = vision.ObjectDetectorOptions(base_options=baseOptions, detection_options=detectionOptions) detector = vision.ObjectDetector.create_from_options(options) while True: tStart = time() im = picam2.capture_array() im = cv2.flip(im, -1) imRGB = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # convert from BGR to RGB image imTensor = vision.TensorImage.create_from_array(imRGB) # create a tensor image myDetections = detector.detect(imTensor) # get the objects that are detected by tensorflow image = utils.visualize(im, myDetections) # create a decorated image with detected objects cv2.putText(im, str(int(fps)) + " FPS", pos, font, height, myColor, weight) cv2.imshow('Camera', im) if cv2.waitKey(1)==ord('q'): break tEnd = time() loopTime = tEnd - tStart fps = 0.9 * fps + 0.1 * 1/loopTime cv2.destroyAllWindows()
Code:
myDetections = self._detector.detect(imTensor)
Full class
Code:
import cv2import osos.environ["LIBCAMERA_LOG_LEVELS"] = "3" #disable info and warning loggingfrom picamera2 import Picamera2from time import timefrom tflite_support.task import corefrom tflite_support.task import processorfrom tflite_support.task import visionimport utilsclass Camera: def __init__(self, resolution, rotation=True): self._dispW, self._dispH = resolution self._centerX = int(self._dispW / 2) self._centerY = int(self._dispH / 2) self._rotation = rotation self._picam2 = None # text on video properties self._colour = (0, 255, 0) self._textPositions = self._set_text_positions() self._font = cv2.FONT_HERSHEY_SIMPLEX self._scale = 1 self._thickness = 1 self._angleText = None self._speedText = None self._turnText = None self._zoomValue = 1.0 self._hudActive = True self._carEnabled = False self._servoEnabled = False self._fps = 0 self._weightPrevFps = 0.9 self._weightNewFps = 0.1 self._fpsPos = (10, 30) self._arrayDict = None self._number_to_turnValue = { 0: "-", 1: "Left", 2: "Right" } #tensorflow variables model = "/home/christian/Python/RoboCar_2.0/src/efficientdet_lite0.tflite" #needs to be full path #TODO: move to config file numThreads = 4 baseOptions = core.BaseOptions(file_name=model, use_coral=False, num_threads=numThreads) detectionOptions = processor.DetectionOptions(max_results=3, score_threshold=0.5) options = vision.ObjectDetectorOptions(base_options=baseOptions, detection_options=detectionOptions) self._detector = vision.ObjectDetector.create_from_options(options) def setup(self): self._picam2 = Picamera2() self._picam2.preview_configuration.main.size = (self._dispW, self._dispH) self._picam2.preview_configuration.main.format = 'RGB888' self._picam2.preview_configuration.align() self._picam2.configure("preview") self._picam2.start() def show_camera_feed(self, shared_array): tStart = time() # start timer for calculating fps # get raw image im = self._picam2.capture_array() if self._rotation: im = cv2.flip(im, -1) #TODO: wrap this in a method imRGB = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # convert from BGR to RGB image imTensor = vision.TensorImage.create_from_array(imRGB) # create a tensor image # This is the line that the code gets stuck on myDetections = self._detector.detect(imTensor) # get the objects that are detected by tensorflow #image = utils.visualize(im, myDetections) # create a decorated image with detected objects # read control values from external classes self._read_control_values_for_video_feed(shared_array) # resize image when zooming if self._zoomValue != 1.0: im = self._get_zoomed_image(im) # add control values to cam feed if self._hudActive: self._add_text_to_cam_feed(im) cv2.imshow("Camera", im) cv2.waitKey(1) # calculate fps self._calculate_fps(tStart) def cleanup(self): cv2.destroyAllWindows() self._picam2.close() def set_car_enabled(self): self._carEnabled = True def set_servo_enabled(self): self._servoEnabled = True def add_array_dict(self, arrayDict): self._arrayDict = arrayDict def _set_text_positions(self): spacingVertical = 30 horizontalCoord = 10 verticalCoord = self._dispH - 15 positions = [] for i in range(4): position = (horizontalCoord, verticalCoord - i * spacingVertical) positions.append(position) return positions def _calculate_fps(self, startTime): endTime = time() loopTime = endTime - startTime self._fps = self._weightPrevFps * self._fps + self._weightNewFps * (1 / loopTime) def _get_zoomed_image(self, image): halfZoomDisplayWidth = int(self._dispW / (2 * self._zoomValue)) halfZoomDisplayHeight = int(self._dispH / (2 * self._zoomValue)) regionOfInterest = image[self._centerY - halfZoomDisplayHeight:self._centerY + halfZoomDisplayHeight, self._centerX - halfZoomDisplayWidth:self._centerX + halfZoomDisplayWidth] im = cv2.resize(regionOfInterest, (self._dispW, self._dispH), cv2.INTER_LINEAR) return im def _add_text_to_cam_feed(self, image): # add control values to camera feed counter = 0 cv2.putText(image, "Zoom: " + str(self._zoomValue) + "x", self._get_origin(counter), self._font, self._scale, self._colour, self._thickness) counter += 1 if self._angleText: cv2.putText(image, self._angleText, self._get_origin(counter), self._font, self._scale, self._colour, self._thickness) counter += 1 if self._speedText: cv2.putText(image, self._speedText, self._get_origin(counter), self._font, self._scale, self._colour, self._thickness) counter += 1 if self._turnText: cv2.putText(image, self._turnText, self._get_origin(counter), self._font, self._scale, self._colour, self._thickness) counter += 1 # display fps cv2.putText(image, self._get_fps(), self._fpsPos, self._font, self._scale, self._colour, self._thickness) def _get_fps(self): return str(int(self._fps)) + " FPS" def _read_control_values_for_video_feed(self, shared_array): if self._carEnabled: self._speedText = "Speed: " + str(int(shared_array[self._arrayDict["speed"]])) + "%" self._turnText = "Turn: " + self._get_turn_value(shared_array[self._arrayDict["turn"]]) if self._servoEnabled: self._angleText = "Angle: " + str(int(shared_array[self._arrayDict["servo"]])) self._hudActive = shared_array[self._arrayDict["HUD"]] self._zoomValue = shared_array[self._arrayDict["Zoom"]] def _get_turn_value(self, number): return self._number_to_turnValue[number] def _get_origin(self, count): return self._textPositions[count]
https://github.com/Siggerud/RoboCar_2.0
Statistics: Posted by Siggerud — Fri Oct 18, 2024 7:53 pm